From noreply at buildbot.pypy.org Fri Aug 1 03:15:45 2014
From: noreply at buildbot.pypy.org (pjenvey)
Date: Fri, 1 Aug 2014 03:15:45 +0200 (CEST)
Subject: [pypy-commit] pypy py3.3: redo 7e04e788d910 without breaking
translation
Message-ID: <20140801011545.568521C09B2@cobra.cs.uni-duesseldorf.de>
Author: Philip Jenvey For each step, we estimated the time that it would take to complete for an
experienced developer who is already familiar with the PyPy codebase. From
-this number, the money is calculated considering a hourly rate of $60, and a
+this number, the money is calculated considering an hourly rate of $60, and a
5% general donation which goes to the Software Freedom Conservancy itself, the non-profit
organization of which the PyPy project is a member and which manages all the
issues related to donations, payments, and tax-exempt status. As with all speed improvements, it's relatively hard to predict exactly
-how it'll cope, however we expect the results to be withing an order
+how it'll cope, however we expect the results to be within an order
of magnitude of handwritten C equivalent. Estimated costs: USD$30,000. Estimated duration: 3 months.])
Message-ID: <20140805085953.3F6F61C0588@cobra.cs.uni-duesseldorf.de>
Author: Tyler Wade
])
diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py
--- a/rpython/rtyper/lltypesystem/rstr.py
+++ b/rpython/rtyper/lltypesystem/rstr.py
@@ -820,6 +820,32 @@
i += 1
return result
+ def ll_join_chars_with_str(s, length, chars):
+ s_chars = s.chars
+ s_len = len(s_chars)
+ num_chars = length
+ if num_chars == 0:
+ return s.empty()
+
+ try:
+ seplen = ovfcheck(s_len * (num_chars - 1))
+ except OverflowError:
+ raise MemoryError
+
+ # a single '+' at the end is allowed to overflow: it gets
+ # a negative result, and the gc will complain
+ result = s.malloc(num_chars + seplen)
+ res_index = 1
+ result.chars[0] = chars[0]
+ i = 1
+ while i < num_chars:
+ s.copy_contents(s, result, 0, res_index, s_len)
+ res_index += s_len
+ result.chars[res_index] = chars[i]
+ res_index += 1
+ i += 1
+ return result
+
@jit.oopspec('stroruni.slice(s1, start, stop)')
@signature(types.any(), types.int(), types.int(), returns=types.any())
@jit.elidable
diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py
--- a/rpython/rtyper/rstr.py
+++ b/rpython/rtyper/rstr.py
@@ -235,6 +235,8 @@
else:
if r_lst.item_repr == rstr.repr:
llfn = self.ll.ll_join
+ elif r_lst.item_repr == char_repr:
+ llfn = self.ll.ll_join_chars_with_str
else:
raise TyperError("sep.join() of non-string list: %r" % r_lst)
return hop.gendirectcall(llfn, v_str, v_length, v_items)
diff --git a/rpython/rtyper/test/test_rstr.py b/rpython/rtyper/test/test_rstr.py
--- a/rpython/rtyper/test/test_rstr.py
+++ b/rpython/rtyper/test/test_rstr.py
@@ -516,6 +516,9 @@
res = self.interpret(lambda: const('.').join([const('abc'), const('def')]), [])
assert self.ll_to_string(res) == const('abc.def')
+ res = self.interpret(lambda: const(' ').join([const('a'), const('b'), const('c')]), [])
+ assert self.ll_to_string(res) == const('a b c')
+
def fn(i, j):
s1 = [ const(''), const(','), const(' and ')]
s2 = [ [], [const('foo')], [const('bar'), const('baz'), const('bazz')]]
From noreply at buildbot.pypy.org Tue Aug 5 10:59:54 2014
From: noreply at buildbot.pypy.org (waedt)
Date: Tue, 5 Aug 2014 10:59:54 +0200 (CEST)
Subject: [pypy-commit] pypy utf8-unicode2: Fixed translation
Message-ID: <20140805085954.853BB1C0588@cobra.cs.uni-duesseldorf.de>
Author: Tyler Wade
i", 0) # pad
- + pack(">q", 0) # 8 not fullscreen
- + pack("
i", 0) # pad
+ + pack(">q", 0) # 8 not fullscreen
+ + pack("
Author: Armin Rigo
About estimates and costs
The PyPy project is in a unique position in that it could support -Python 3 without having to discontinue supporting Python 2, with the possibility of reusing a large part of of code base and fully +Python 3 without having to discontinue supporting Python 2, with the possibility of reusing a large part of the code base and fully reusing its unique translation and JIT-Compiler technologies. However, it requires a lot of work, and it will take a long time before we can complete a Python 3 port if we only wait for volunteer @@ -115,7 +115,7 @@
For each step, we estimated the time that it would take to complete for an experienced developer who is already familiar with the PyPy codebase. From -this number, the money is calculated considering a hourly rate of $60, and a +this number, the money is calculated considering an hourly rate of $60, and a 5% general donation which goes to the Software Freedom Conservancy itself, the non-profit association of which the PyPy project is a member and which manages all the issues related to donations, taxes and payments.
@@ -255,7 +255,7 @@ some attention towards implementing Python 3. This will not hinder other directions in which PyPy is going like improving performance. The goal of the PyPy community is to support both Python 2 and Python 3 for the -forseeable future. +foreseeable future.PyPy's developers make all PyPy software available to the public without
charge, under PyPy's Open Source copyright license, the permissive MIT
License. PyPy's license assures that PyPy is equally available to
diff --git a/source/numpydonate.txt b/source/numpydonate.txt
--- a/source/numpydonate.txt
+++ b/source/numpydonate.txt
@@ -104,7 +104,7 @@
For each step, we estimated the time that it would take to complete for an
experienced developer who is already familiar with the PyPy codebase. From
-this number, the money is calculated considering a hourly rate of $60, and a
+this number, the money is calculated considering an hourly rate of $60, and a
5% general donation which goes to the `Software Freedom Conservancy`_ itself, the non-profit
organization of which the PyPy project is a member and which manages all the
issues related to donations, payments, and tax-exempt status.
@@ -145,7 +145,7 @@
for tight loops
As with all speed improvements, it's relatively hard to predict exactly
- how it'll cope, however we expect the results to be withing an order
+ how it'll cope, however we expect the results to be within an order
of magnitude of handwritten C equivalent.
Estimated costs: USD$30,000. Estimated duration: 3 months.
diff --git a/source/py3donate.txt b/source/py3donate.txt
--- a/source/py3donate.txt
+++ b/source/py3donate.txt
@@ -25,7 +25,7 @@
harder for everyone.
The PyPy project is in a unique position in that it could support
-Python 3 without having to discontinue supporting Python 2, with the possibility of reusing a large part of of code base and fully
+Python 3 without having to discontinue supporting Python 2, with the possibility of reusing a large part of the code base and fully
reusing its unique translation and JIT-Compiler technologies.
However, it requires a lot of work, and it will take a long time
before we can complete a Python 3 port if we only wait for volunteer
@@ -89,7 +89,7 @@
For each step, we estimated the time that it would take to complete for an
experienced developer who is already familiar with the PyPy codebase. From
-this number, the money is calculated considering a hourly rate of $60, and a
+this number, the money is calculated considering an hourly rate of $60, and a
5% general donation which goes to the `Software Freedom Conservancy`_ itself, the non-profit
association of which the PyPy project is a member and which manages all the
issues related to donations, taxes and payments.
@@ -271,7 +271,7 @@
some attention towards implementing Python 3. This will not hinder other
directions in which PyPy is going like improving performance. The goal
of the PyPy community is to support both Python 2 and Python 3 for the
-forseeable future.
+foreseeable future.
PyPy's developers make all PyPy software available to the public without
charge, under PyPy's Open Source copyright license, the permissive MIT
From noreply at buildbot.pypy.org Sat Aug 9 01:02:17 2014
From: noreply at buildbot.pypy.org (alex_gaynor)
Date: Sat, 9 Aug 2014 01:02:17 +0200 (CEST)
Subject: [pypy-commit] pypy.org extradoc: merged upstream
Message-ID: <20140808230217.557851C3352@cobra.cs.uni-duesseldorf.de>
Author: Alex Gaynor
The current status of PyPy, with a particular focus on what happened in +the last two years, since the last EuroPython PyPy talk. We will give a +brief overview of the current speed and the on-going development efforts +on the JIT, the GC, NumPy, Python 3 compatibility, CFFI, STM...
+In this talk we will present the current status of PyPy, with a +particular focus on what happened in the last two years, since the last +EuroPython PyPy talk. We will give an overview of the current speed and +the on-going development efforts, including but not limited to:
+This is the "general PyPy status talk" that we give every year at +EuroPython (except last year; hence the "no no, PyPy is not dead" part +of the title of this talk).
+PyPy, the Python implementation written in Python, experimentally +supports Transactional Memory (TM). The strength of TM is to enable a +novel use of multithreading, inheritently safe, and not limited to +special use cases like other approaches. This talk will focus on how it +works under the hood.
+PyPy is a fast alternative Python implementation. Software +Transactional Memory (STM) is a current academic research topic. Put +the two together --brew for a couple of years-- and we get a version of +PyPy that runs on multiple cores, without the infamous Global +Interpreter Lock (GIL).
+The current research is based on a recent new insight that promises to +give really good performance. The speed of STM is generally measured by +two factors: the ability to scale with the number of CPUs, and the +amount of overhead when compared with other approaches in a single CPU +(in this case, with the regular PyPy with the GIL). Scaling is not +really a problem here, but single-CPU performance is --or used to be. +This new approach gives a single-threaded overhead that should be very +low, maybe 20%, which would definitely be news for STM systems. Right +now (February 2014) we are still implementing it, so we cannot give +final numbers yet, but early results on a small interpreter for a custom +language are around 15%. This looks like a deal-changer for STM.
+In the talk, I will describe our progress, hopefully along with real +numbers and demos. I will then dive under the hood of PyPy to give an +idea about how it works. I will conclude with a picture of how the +future of multi-threaded programming might looks like, for high-level +languages like Python. I will also mention CPython: how hard (or not) +it would be to change the CPython source code to use the same approach.
+All modules that are pure python in CPython of course work.
+Numpy support is not complete. We maintain our own fork of numpy for now, further instructions can be found at https://bitbucker.org/pypy/numpy.git.
Python libraries known to work under PyPy (the list is not exhaustive). A community maintained compatibility wiki is hosted on bitbucket:
Branch: gc-incminimark-pinning
Changeset: r72762:6b1a2dd2d891
Date: 2014-08-12 13:15 +0200
http://bitbucket.org/pypy/pypy/changeset/6b1a2dd2d891/
Log: only run assert if no pinned objects are around
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -2026,8 +2026,13 @@
debug_start("gc-collect-step")
debug_print("starting gc state: ", GC_STATES[self.gc_state])
# Debugging checks
- ll_assert(self.nursery_free == self.nursery,
- "nursery not empty in major_collection_step()")
+ if self.pinned_objects_in_nursery == 0:
+ ll_assert(self.nursery_free == self.nursery,
+ "nursery not empty in major_collection_step()")
+ else:
+ # XXX try to add some similar check to the above one for the case
+ # that the nursery still contains some pinned objects (groggi)
+ pass
self.debug_check_consistency()
From noreply at buildbot.pypy.org Tue Aug 12 13:51:02 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Tue, 12 Aug 2014 13:51:02 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: old objects that no
longer point to a pinned one are removed from the internal list
Message-ID: <20140812115102.72E281C03AC@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72763:0645fc78c5e0
Date: 2014-08-12 13:16 +0200
http://bitbucket.org/pypy/pypy/changeset/0645fc78c5e0/
Log: old objects that no longer point to a pinned one are removed from
the internal list
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -1569,8 +1569,18 @@
# objects. This way we populate 'surviving_pinned_objects'
# with pinned object that are (only) visible from an old
# object.
- self.old_objects_pointing_to_pinned.foreach(
+ # Additionally we create a new list as it may be that an old object
+ # no longer points to a pinned one and we want them to remove from
+ # the list.
+ if self.old_objects_pointing_to_pinned.non_empty():
+ current_old_objects_pointing_to_pinned = \
+ self.old_objects_pointing_to_pinned
+ #
+ self.old_objects_pointing_to_pinned = self.AddressStack()
+ # visit the ones we know of
+ current_old_objects_pointing_to_pinned.foreach(
self._visit_old_objects_pointing_to_pinned, None)
+ current_old_objects_pointing_to_pinned.delete()
#
while True:
# If we are using card marking, do a partial trace of the arrays
@@ -1684,7 +1694,7 @@
debug_stop("gc-minor")
def _visit_old_objects_pointing_to_pinned(self, obj, ignore):
- self.trace(obj, self._trace_drag_out, llmemory.NULL)
+ self.trace(obj, self._trace_drag_out, obj)
def collect_roots_in_nursery(self):
# we don't need to trace prebuilt GcStructs during a minor collect:
From noreply at buildbot.pypy.org Tue Aug 12 13:51:03 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Tue, 12 Aug 2014 13:51:03 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: wip: rewriting object
pinning tests. they were a mess.
Message-ID: <20140812115103.B09461C03AC@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72764:d1cb742ced7f
Date: 2014-08-12 13:48 +0200
http://bitbucket.org/pypy/pypy/changeset/d1cb742ced7f/
Log: wip: rewriting object pinning tests. they were a mess.
This new tests, which are based on the old tests, already discovered
two overlooked problems. The new tests do check the state of the GC
more thorough.
diff --git a/rpython/memory/gc/test/test_object_pinning.py b/rpython/memory/gc/test/test_object_pinning.py
--- a/rpython/memory/gc/test/test_object_pinning.py
+++ b/rpython/memory/gc/test/test_object_pinning.py
@@ -45,328 +45,339 @@
class TestIncminimark(PinningGCTest):
from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass
+ from rpython.memory.gc.incminimark import STATE_SCANNING
- def simple_pin_stack(self, collect_func):
- # create object, pin it and point from stackroots to it
+ def pinned_obj_in_stackroot(self, collect_func):
+ # scenario: a pinned object that is part of the stack roots. Check if
+ # it is not moved
+ #
ptr = self.malloc(S)
ptr.someInt = 100
self.stackroots.append(ptr)
+ assert self.stackroots[0] == ptr # validate our assumption
+
+ adr = llmemory.cast_ptr_to_adr(ptr)
+ assert self.gc.is_in_nursery(adr) # to be sure
+ assert self.gc.pin(adr)
+ #
+ # the object shouldn't move from now on
+ collect_func()
+ #
+ # check if it is still at the same location as expected
+ adr_after_collect = llmemory.cast_ptr_to_adr(self.stackroots[0])
+ assert self.gc.is_in_nursery(adr_after_collect)
+ assert adr == adr_after_collect
+ assert self.gc._is_pinned(adr)
+ assert ptr.someInt == 100
+ assert self.gc.pinned_objects_in_nursery == 1
+
+ def test_pinned_obj_in_stackroot_minor_collection(self):
+ self.pinned_obj_in_stackroot(self.gc.minor_collection)
+
+ def test_pinned_obj_in_stackroot_full_major_collection(self):
+ self.pinned_obj_in_stackroot(self.gc.collect)
+
+ def test_pinned_obj_in_stackroots_stepwise_major_collection(self):
+ # scenario: same as for 'pinned_obj_in_stackroot' with minor change
+ # that we do stepwise major collection and check in each step for
+ # a correct state
+ #
+ ptr = self.malloc(S)
+ ptr.someInt = 100
+ self.stackroots.append(ptr)
+ assert self.stackroots[0] == ptr # validate our assumption
+
+ adr = llmemory.cast_ptr_to_adr(ptr)
+ assert self.gc.is_in_nursery(adr)
+ assert self.gc.pin(adr)
+ #
+ # the object shouldn't move from now on. Do a full round of major
+ # steps and check each time for correct state
+ #
+ # check that we start at the expected point
+ assert self.gc.gc_state == self.STATE_SCANNING
+ done = False
+ while not done:
+ self.gc.debug_gc_step()
+ # check that the pinned object didn't move
+ ptr_after_collection = self.stackroots[0]
+ adr_after_collection = llmemory.cast_ptr_to_adr(ptr_after_collection)
+ assert self.gc.is_in_nursery(adr_after_collection)
+ assert adr == adr_after_collection
+ assert self.gc._is_pinned(adr)
+ assert ptr.someInt == 100
+ assert self.gc.pinned_objects_in_nursery == 1
+ # as the object is referenced from the stackroots, the gc internal
+ # 'old_objects_pointing_to_pinned' should be empty
+ assert not self.gc.old_objects_pointing_to_pinned.non_empty()
+ #
+ # break condition
+ done = self.gc.gc_state == self.STATE_SCANNING
+
+
+ def pin_unpin_moved_stackroot(self, collect_func):
+ # scenario: test if the pinned object is moved after being unpinned.
+ # the second part of the scenario is the tested one. The first part
+ # is already tests by other tests.
+ ptr = self.malloc(S)
+ ptr.someInt = 100
+ self.stackroots.append(ptr)
+ assert self.stackroots[0] == ptr # validate our assumption
adr = llmemory.cast_ptr_to_adr(ptr)
assert self.gc.pin(adr)
collect_func()
+ #
+ # from here on the test really starts. previouse logic is already tested
+ #
+ self.gc.unpin(adr)
+ assert not self.gc._is_pinned(adr)
+ assert self.gc.is_in_nursery(adr)
+ #
+ # now we do another collection and the object should be moved out of
+ # the nursery.
+ collect_func()
+ new_adr = llmemory.cast_ptr_to_adr(self.stackroots[0])
+ assert not self.gc.is_in_nursery(new_adr)
+ assert self.stackroots[0].someInt == 100
+ with py.test.raises(RuntimeError) as exinfo:
+ ptr.someInt = 200
+ assert "freed" in str(exinfo.value)
+
+ def test_pin_unpin_moved_stackroot_minor_collection(self):
+ self.pin_unpin_moved_stackroot(self.gc.minor_collection)
+
+ def test_pin_unpin_moved_stackroot_major_collection(self):
+ self.pin_unpin_moved_stackroot(self.gc.collect)
+
+
+ def pin_referenced_from_old(self, collect_func):
+ # scenario: an old object points to a pinned one. Check if the pinned
+ # object is correctly kept in the nursery and not moved.
+ #
+ # create old object
+ old_ptr = self.malloc(S)
+ old_ptr.someInt = 900
+ self.stackroots.append(old_ptr)
+ assert self.stackroots[0] == old_ptr # validate our assumption
+ collect_func() # make it old: move it out of the nursery
+ old_ptr = self.stackroots[0]
+ assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(old_ptr))
+ #
+ # create young pinned one and let the old one reference the young one
+ pinned_ptr = self.malloc(S)
+ pinned_ptr.someInt = 100
+ self.write(old_ptr, 'next', pinned_ptr)
+ pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr)
+ assert self.gc.pin(pinned_adr)
+ assert self.gc.is_in_nursery(pinned_adr)
+ assert old_ptr.next.someInt == 100
+ assert self.gc.pinned_objects_in_nursery == 1
+ #
+ # do a collection run and make sure the pinned one didn't move
+ collect_func()
+ assert old_ptr.next.someInt == pinned_ptr.someInt == 100
+ assert llmemory.cast_ptr_to_adr(old_ptr.next) == pinned_adr
+ assert self.gc.is_in_nursery(pinned_adr)
- assert self.gc.is_in_nursery(adr)
- assert ptr.someInt == 100
+ def test_pin_referenced_from_old_minor_collection(self):
+ self.pin_referenced_from_old(self.gc.minor_collection)
- def test_simple_pin_stack_full_collect(self):
- self.simple_pin_stack(self.gc.collect)
+ def test_pin_referenced_from_old_major_collection(self):
+ self.pin_referenced_from_old(self.gc.collect)
- def test_simple_pin_stack_minor_collect(self):
- self.simple_pin_stack(self.gc.minor_collection)
+ def test_pin_referenced_from_old_stepwise_major_collection(self):
+ # scenario: same as in 'pin_referenced_from_old'. However,
+ # this time we do a major collection step by step and check
+ # between steps that the states are as expected.
+ #
+ # create old object
+ old_ptr = self.malloc(S)
+ old_ptr.someInt = 900
+ self.stackroots.append(old_ptr)
+ assert self.stackroots[0] == old_ptr # validate our assumption
+ self.gc.minor_collection() # make it old: move it out of the nursery
+ old_ptr = self.stackroots[0]
+ old_adr = llmemory.cast_ptr_to_adr(old_ptr)
+ assert not self.gc.is_in_nursery(old_adr)
+ #
+ # create young pinned one and let the old one reference the young one
+ pinned_ptr = self.malloc(S)
+ pinned_ptr.someInt = 100
+ self.write(old_ptr, 'next', pinned_ptr)
+ pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr)
+ assert self.gc.pin(pinned_adr)
+ assert self.gc.is_in_nursery(pinned_adr)
+ assert old_ptr.next.someInt == 100
+ assert self.gc.pinned_objects_in_nursery == 1
+ #
+ # stepwise major collection with validation between steps
+ # check that we start at the expected point
+ assert self.gc.gc_state == self.STATE_SCANNING
+ done = False
+ while not done:
+ self.gc.debug_gc_step()
+ #
+ # make sure pinned object didn't move
+ assert old_ptr.next.someInt == pinned_ptr.someInt == 100
+ assert llmemory.cast_ptr_to_adr(old_ptr.next) == pinned_adr
+ assert self.gc.is_in_nursery(pinned_adr)
+ assert self.gc.pinned_objects_in_nursery == 1
+ #
+ # validate that the old object is part of the internal list
+ # 'old_objects_pointing_to_pinned' as expected.
+ should_be_old_adr = self.gc.old_objects_pointing_to_pinned.pop()
+ assert should_be_old_adr == old_adr
+ self.gc.old_objects_pointing_to_pinned.append(should_be_old_adr)
+ #
+ # break condition
+ done = self.gc.gc_state == self.STATE_SCANNING
- def simple_pin_unpin_stack(self, collect_func):
- ptr = self.malloc(S)
- ptr.someInt = 100
-
- self.stackroots.append(ptr)
-
- adr = llmemory.cast_ptr_to_adr(ptr)
- assert self.gc.pin(adr)
-
+
+ def pin_referenced_from_old_remove_ref(self, collect_func):
+ # scenario: an old object points to a pinned one. We remove the
+ # reference from the old one. So nothing points to the pinned object.
+ # After this the pinned object should be collected (it's dead).
+ #
+ # Create the objects and get them to our initial state (this is not
+ # tested here, should be already tested by other tests)
+ old_ptr = self.malloc(S)
+ old_ptr.someInt = 900
+ self.stackroots.append(old_ptr)
+ assert self.stackroots[0] == old_ptr # check assumption
+ collect_func() # make it old
+ old_ptr = self.stackroots[0]
+ #
+ pinned_ptr = self.malloc(S)
+ pinned_ptr.someInt = 100
+ self.write(old_ptr, 'next', pinned_ptr)
+ pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr)
+ assert self.gc.pin(pinned_adr)
+ #
collect_func()
+ # from here on we have our initial state for this test.
+ #
+ # first check some basic assumptions.
+ assert self.gc.is_in_nursery(pinned_adr)
+ assert self.gc._is_pinned(pinned_adr)
+ # remove the reference
+ self.write(old_ptr, 'next', lltype.nullptr(S))
+ # from now on the pinned object is dead. Do a collection and make sure
+ # old object still there and the pinned one is gone.
+ collect_func()
+ assert self.stackroots[0].someInt == 900
+ assert not self.gc.old_objects_pointing_to_pinned.non_empty()
+ with py.test.raises(RuntimeError) as exinfo:
+ pinned_ptr.someInt = 200
+ assert "freed" in str(exinfo.value)
- assert self.gc.is_in_nursery(adr)
- assert ptr.someInt == 100
-
- # unpin and check if object is gone from nursery
- self.gc.unpin(adr)
+ def test_pin_referenced_from_old_remove_ref_minor_collection(self):
+ self.pin_referenced_from_old_remove_ref(self.gc.minor_collection)
+
+ def test_pin_referenced_from_old_remove_ref_major_collection(self):
+ self.pin_referenced_from_old_remove_ref(self.gc.collect)
+
+
+ def pin_referenced_from_old_remove_old(self, collect_func):
+ # scenario: an old object referenced a pinned object. After removing
+ # the stackroot reference to the old object, bot objects (old and pinned)
+ # must be collected.
+ # This test is important as we expect not reachable pinned objects to
+ # be collected. At the same time we have an internal list of objects
+ # pointing to pinned ones and we must make sure that because of it the
+ # old/pinned object survive.
+ #
+ # create the objects and get them to the initial state for this test.
+ # Everything on the way to the initial state should be covered by
+ # other tests.
+ old_ptr = self.malloc(S)
+ old_ptr.someInt = 900
+ self.stackroots.append(old_ptr)
collect_func()
- try:
- assert ptr.someInt == 100
- assert False
- except RuntimeError as ex:
- assert "freed" in str(ex)
-
- # check if we object is still accessible
- ptr_old = self.stackroots[0]
- assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(ptr_old))
- assert ptr_old.someInt == 100
+ old_ptr = self.stackroots[0]
+ #
+ pinned_ptr = self.malloc(S)
+ pinned_ptr.someInt = 100
+ self.write(old_ptr, 'next', pinned_ptr)
+ assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned_ptr))
+ #
+ collect_func()
+ #
+ # now we have our initial state: old object referenced from stackroots.
+ # Old object referencing a young pinned one. Next step is to make some
+ # basic checks that we got the expected state.
+ assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(old_ptr))
+ assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(pinned_ptr))
+ assert pinned_ptr == old_ptr.next
+ #
+ # now we remove the old object from the stackroots...
+ self.stackroots.remove(old_ptr)
+ # ... and do a major collection (otherwise the old object wouldn't be
+ # gone).
+ self.gc.collect()
+ # check that both objects are gone
+ assert not self.gc.old_objects_pointing_to_pinned.non_empty()
+ with py.test.raises(RuntimeError) as exinfo_old:
+ old_ptr.someInt = 800
+ assert "freed" in str(exinfo_old.value)
+ #
+ with py.test.raises(RuntimeError) as exinfo_pinned:
+ pinned_ptr.someInt = 200
+ assert "freed" in str(exinfo_pinned.value)
- def test_simple_pin_unpin_stack_full_collect(self):
- self.simple_pin_unpin_stack(self.gc.collect)
+ def test_pin_referenced_from_old_remove_old_minor_collection(self):
+ self.pin_referenced_from_old_remove_old(self.gc.minor_collection)
- def test_simple_pin_unpin_stack_minor_collect(self):
- self.simple_pin_unpin_stack(self.gc.minor_collection)
+ def test_pin_referenced_from_old_remove_old_major_collection(self):
+ self.pin_referenced_from_old_remove_old(self.gc.collect)
- def test_pinned_obj_collected_after_old_object_collected(self):
+
+ def pin_referenced_from_young_in_stackroots(self, collect_func):
+ # scenario: a young object is referenced from the stackroots. This
+ # young object points to a young pinned object. We check if everything
+ # behaves as expected after a collection: the young object is moved out
+ # of the nursery while the pinned one stays where it is.
+ #
root_ptr = self.malloc(S)
- root_ptr.someInt = 999
+ root_ptr.someInt = 900
self.stackroots.append(root_ptr)
- self.gc.collect()
-
- root_ptr = self.stackroots[0]
- next_ptr = self.malloc(S)
- next_ptr.someInt = 111
- assert self.gc.pin(llmemory.cast_ptr_to_adr(next_ptr))
- self.write(root_ptr, 'next', next_ptr)
- self.gc.collect()
- # check still alive
- assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(root_ptr.next))
- self.stackroots.remove(root_ptr)
- self.gc.collect()
- # root_ptr was collected and therefore also the pinned object should
- # be gone
- try:
- next_ptr.someInt = 101
- assert False
- except RuntimeError as ex:
- assert "freed" in str(ex)
-
- # XXX more tests like the one above. Make list of all possible cases and
- # write tests for each one. Also: minor/full major collection tests maybe
- # needed
-
- def test_pin_referenced_from_stackroot_young(self):
+ assert self.stackroots[0] == old_ptr # validate assumption
#
- # create two objects and reference the pinned one
- # from the one that will be moved out of the
- # nursery.
- root_ptr = self.malloc(S)
- next_ptr = self.malloc(S)
- self.write(root_ptr, 'next', next_ptr)
- self.stackroots.append(root_ptr)
+ pinned_ptr = self.malloc(S)
+ pinned_ptr.someInt = 100
+ self.write(root_ptr, 'next', pinned_ptr)
+ pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr)
+ assert self.gc.pin(pinned_adr)
+ # check both are in nursery
+ assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(root_ptr))
+ assert self.gc.is_in_nursery(pinned_adr)
#
- next_ptr.someInt = 100
- root_ptr.someInt = 999
+ # no old object yet pointing to a pinned one
+ assert not self.gc.old_objects_pointing_to_pinned.non_empty()
#
- next_adr = llmemory.cast_ptr_to_adr(next_ptr)
- assert self.gc.pin(next_adr)
+ # now we do a collection and check if the result is as expected
+ collect_func()
#
- # in this step the 'root_ptr' object will be
- # outside the nursery, pointing to the still
- # young (because it's pinned) 'next_ptr'.
- self.gc.collect()
- #
+ # check if objects are where we expect them
root_ptr = self.stackroots[0]
assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(root_ptr))
- assert self.gc.is_in_nursery(next_adr)
- assert next_ptr.someInt == 100
- assert root_ptr.next == next_ptr
- #
- # now we remove the reference to the pinned object and do a collect
- # to check if the pinned object was removed from nursery.
- self.write(root_ptr, 'next', lltype.nullptr(S))
- self.gc.collect()
- try:
- # should fail as this was the pinned object that is now collected
- next_ptr.someInt = 0
- assert False
- except RuntimeError as ex:
- assert "freed" in str(ex)
+ assert self.gc.is_in_nursery(pinned_adr)
+ # and as 'root_ptr' object is now old, it should be tracked specially
+ should_be_root_ptr = self.gc.old_objects_pointing_to_pinned.pop()
+ assert should_be_root_ptr == root_ptr
+ self.gc.old_objects_pointing_to_pinned.push(should_be_root_ptr)
+ # check that old object still points to the pinned one as expected
+ assert root_ptr.next == pinned_ptr
- def test_old_points_to_pinned(self):
- # Test if we handle the case that an already old object can point
- # to a pinned object and keeps the pinned object alive by
- # that.
- #
- # create the old object that will point to a pinned object
- old_ptr = self.malloc(S)
- old_ptr.someInt = 999
- self.stackroots.append(old_ptr)
- self.gc.collect()
- assert not self.gc.is_in_nursery(
- llmemory.cast_ptr_to_adr(self.stackroots[0]))
- #
- # create the young pinned object and attach it to the old object
- pinned_ptr = self.malloc(S)
- pinned_ptr.someInt = 6
- assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned_ptr))
- self.write(self.stackroots[0], 'next', pinned_ptr)
- #
- # let's check if everything stays in place before/after a collection
- assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(pinned_ptr))
- self.gc.collect()
- assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(pinned_ptr))
- #
- self.stackroots[0].next.someInt = 100
- self.gc.collect()
- assert self.stackroots[0].next.someInt == 100
+ def test_pin_referenced_from_young_in_stackroots_minor_collection(self):
+ self.pin_referenced_from_young_in_stackroots(self.gc.minor_collection)
- def not_pinned_and_stackroots_point_to_pinned(self, make_old):
- # In this test case we point to a pinned object from an (old) object
- # *and* from the stackroots
- obj_ptr = self.malloc(S)
- obj_ptr.someInt = 999
- self.stackroots.append(obj_ptr)
- if make_old:
- self.gc.collect()
- obj_ptr = self.stackroots[0]
- assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(obj_ptr))
- else:
- assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(obj_ptr))
+ def test_pin_referenced_from_young_in_stackroots_major_collection(self):
+ self.pin_referenced_from_young_in_stackroots(self.gc.collect)
- pinned_ptr = self.malloc(S)
- pinned_ptr.someInt = 111
- assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned_ptr))
- self.stackroots.append(pinned_ptr)
- self.write(obj_ptr, 'next', pinned_ptr)
-
- self.gc.collect()
- # done with preparation. do some basic checks
- assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(pinned_ptr))
- assert pinned_ptr.someInt == 111
- assert self.stackroots[0].next == pinned_ptr
-
- def test_old_and_stackroots_point_to_pinned(self):
- self.not_pinned_and_stackroots_point_to_pinned(make_old=True)
-
- def test_young_and_stackroots_point_to_pinned(self):
- self.not_pinned_and_stackroots_point_to_pinned(make_old=False)
-
- def test_old_points_to_old_points_to_pinned_1(self):
- #
- # Scenario:
- # stackroots points to 'root_ptr'. 'root_ptr' points to 'next_ptr'.
- # 'next_ptr' points to the young and pinned 'pinned_ptr'. Here we
- # remove the reference to 'next_ptr' from 'root_ptr' and check if it
- # behaves as expected.
- #
- root_ptr = self.malloc(S)
- root_ptr.someInt = 100
- self.stackroots.append(root_ptr)
- self.gc.collect()
- root_ptr = self.stackroots[0]
- #
- next_ptr = self.malloc(S)
- next_ptr.someInt = 200
- self.write(root_ptr, 'next', next_ptr)
- self.gc.collect()
- next_ptr = root_ptr.next
- #
- # check if everything is as expected
- assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(root_ptr))
- assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(next_ptr))
- assert root_ptr.someInt == 100
- assert next_ptr.someInt == 200
- #
- pinned_ptr = self.malloc(S)
- pinned_ptr.someInt = 300
- assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned_ptr))
- self.write(next_ptr, 'next', pinned_ptr)
- self.gc.collect()
- #
- # validate everything is as expected with 3 rounds of GC collecting
- for _ in range(3):
- self.gc.collect()
- assert next_ptr.next == pinned_ptr
- assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(pinned_ptr))
- assert pinned_ptr.someInt == 300
- assert root_ptr.someInt == 100
- assert next_ptr.someInt == 200
- #
- # remove the reference to the pinned object
- self.write(next_ptr, 'next', root_ptr)
- self.gc.minor_collection()
- # the minor collection visits all old objects pointing to pinned ones.
- # therefore the pinned object should be gone
- try:
- pinned_ptr.someInt == 300
- assert False
- except RuntimeError as ex:
- assert "freed" in str(ex)
-
- def test_old_points_to_old_points_to_pinned_2(self):
- #
- # Scenario:
- # stackroots points to 'root_ptr'. 'root_ptr' points to 'next_ptr'.
- # 'next_ptr' points to the young and pinned 'pinned_ptr'. Here we
- # remove 'root_ptr' from the stackroots and check if it behaves as
- # expected.
- #
- root_ptr = self.malloc(S)
- root_ptr.someInt = 100
- self.stackroots.append(root_ptr)
- self.gc.collect()
- root_ptr = self.stackroots[0]
- #
- next_ptr = self.malloc(S)
- next_ptr.someInt = 200
- self.write(root_ptr, 'next', next_ptr)
- self.gc.collect()
- next_ptr = root_ptr.next
- #
- # check if everything is as expected
- assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(root_ptr))
- assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(next_ptr))
- assert root_ptr.someInt == 100
- assert next_ptr.someInt == 200
- #
- pinned_ptr = self.malloc(S)
- pinned_ptr.someInt = 300
- assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned_ptr))
- self.write(next_ptr, 'next', pinned_ptr)
- self.gc.collect()
- #
- # validate everything is as expected with 3 rounds of GC collecting
- for _ in range(3):
- self.gc.collect()
- assert next_ptr.next == pinned_ptr
- assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(pinned_ptr))
- assert pinned_ptr.someInt == 300
- assert root_ptr.someInt == 100
- assert next_ptr.someInt == 200
- #
- # remove the root from stackroots
- self.stackroots.remove(root_ptr)
- self.gc.minor_collection()
- #
- # the minor collection will still visit 'next_ptr', although
- # 'root_ptr' is not part of the stackroots anymore. This makes
- # sense as 'next_ptr' is removed only in the next major collection
- assert next_ptr.next.someInt == 300
- #
- # now we do a major collection and everything should be gone
- self.gc.collect()
- try:
- pinned_ptr.someInt == 300
- assert False
- except RuntimeError as ex:
- assert "freed" in str(ex)
-
-
- def test_pin_old(self):
- ptr = self.malloc(S)
- ptr.someInt = 100
- self.stackroots.append(ptr)
- self.gc.collect()
- ptr = self.stackroots[0]
- adr = llmemory.cast_ptr_to_adr(ptr)
- assert ptr.someInt == 100
- assert not self.gc.is_in_nursery(adr)
- assert not self.gc.pin(adr)
- # ^^^ should not be possible, struct is already old and won't
- # move.
-
- def test_pin_malloc_pin(self):
- first_ptr = self.malloc(S)
- first_ptr.someInt = 101
- self.stackroots.append(first_ptr)
- assert self.gc.pin(llmemory.cast_ptr_to_adr(first_ptr))
-
- self.gc.collect()
- assert first_ptr.someInt == 101
-
- second_ptr = self.malloc(S)
- second_ptr.someInt = 102
- self.stackroots.append(second_ptr)
- assert self.gc.pin(llmemory.cast_ptr_to_adr(second_ptr))
-
- self.gc.collect()
- assert first_ptr.someInt == 101
- assert second_ptr.someInt == 102
+ # XXX REMOVE THIS COMMENT copied ones:
def pin_shadow_1(self, collect_func):
ptr = self.malloc(S)
@@ -629,35 +640,6 @@
# we did not reset the whole nursery
assert self.gc.nursery_top < self.gc.nursery_real_top
- def test_collect_dead_pinned_objects(self):
- # prepare three object, where two are stackroots
- ptr_stackroot_1 = self.malloc(S)
- ptr_stackroot_1.someInt = 100
- self.stackroots.append(ptr_stackroot_1)
-
- ptr_not_stackroot = self.malloc(S)
-
- ptr_stackroot_2 = self.malloc(S)
- ptr_stackroot_2.someInt = 100
- self.stackroots.append(ptr_stackroot_2)
-
- # pin all three objects
- assert self.gc.pin(llmemory.cast_ptr_to_adr(ptr_stackroot_1))
- assert self.gc.pin(llmemory.cast_ptr_to_adr(ptr_not_stackroot))
- assert self.gc.pin(llmemory.cast_ptr_to_adr(ptr_stackroot_2))
- assert self.gc.pinned_objects_in_nursery == 3
-
- self.gc.collect()
- # now the one not on the stack should be gone.
- assert self.gc.pinned_objects_in_nursery == 2
- assert ptr_stackroot_1.someInt == 100
- assert ptr_stackroot_2.someInt == 100
- try:
- ptr_not_stackroot.someInt = 100
- assert False
- except RuntimeError as ex:
- assert "freed" in str(ex)
-
def fill_nursery_with_pinned_objects(self):
typeid = self.get_type_id(S)
size = self.gc.fixed_size(typeid) + self.gc.gcheaderbuilder.size_gc_header
@@ -714,3 +696,4 @@
# nursery should be full now, at least no space for another `S`. Next malloc should fail.
py.test.raises(Exception, self.malloc, S)
test_full_pinned_nursery_pin_fail.GC_PARAMS = {'max_number_of_pinned_objects': 50}
+
From noreply at buildbot.pypy.org Tue Aug 12 14:17:10 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Tue, 12 Aug 2014 14:17:10 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: additional object
pinning tests. some tests renamed.
Message-ID: <20140812121710.B28681C03AC@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72765:119a5775c08a
Date: 2014-08-12 14:16 +0200
http://bitbucket.org/pypy/pypy/changeset/119a5775c08a/
Log: additional object pinning tests. some tests renamed.
diff --git a/rpython/memory/gc/test/test_object_pinning.py b/rpython/memory/gc/test/test_object_pinning.py
--- a/rpython/memory/gc/test/test_object_pinning.py
+++ b/rpython/memory/gc/test/test_object_pinning.py
@@ -47,6 +47,74 @@
from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass
from rpython.memory.gc.incminimark import STATE_SCANNING
+ def test_pin_old(self):
+ # scenario: try pinning an old object. This should be not possible and
+ # we want to make sure everything stays as it is.
+ old_ptr = self.malloc(S)
+ old_ptr.someInt = 900
+ self.stackroots.append(old_ptr)
+ assert self.stackroots[0] == old_ptr # test assumption
+ self.gc.collect()
+ old_ptr = self.stackroots[0]
+ # now we try to pin it
+ old_adr = llmemory.cast_ptr_to_adr(old_ptr)
+ assert not self.gc.is_in_nursery(old_adr)
+ assert not self.gc.pin(old_adr)
+ assert self.gc.pinned_objects_in_nursery == 0
+
+
+ def pin_pin_pinned_object_count(self, collect_func):
+ # scenario: pin two objects that are referenced from stackroots. Check
+ # if the pinned objects count is correct, even after an other collection
+ pinned1_ptr = self.malloc(S)
+ pinned1_ptr.someInt = 100
+ self.stackroots.append(pinned1_ptr)
+ #
+ pinned2_ptr = self.malloc(S)
+ pinned2_ptr.someInt = 200
+ self.stackroots.append(pinned2_ptr)
+ #
+ assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned1_ptr))
+ assert self.gc.pinned_objects_in_nursery == 1
+ assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned2_ptr))
+ assert self.gc.pinned_objects_in_nursery == 2
+ #
+ collect_func()
+ #
+ assert self.gc.pinned_objects_in_nursery == 2
+
+ def test_pin_pin_pinned_object_count_minor_collection(self):
+ self.pin_pin_pinned_object_count(self.gc.minor_collection)
+
+ def test_pin_pin_pinned_object_count_major_collection(self):
+ self.pin_pin_pinned_object_count(self.gc.collect)
+
+
+ def pin_unpin_pinned_object_count(self, collect_func):
+ # scenario: pin an object and check the pinned object count. Unpin it
+ # and check the count again.
+ pinned_ptr = self.malloc(S)
+ pinned_ptr.someInt = 100
+ self.stackroots.append(pinned_ptr)
+ pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr)
+ #
+ assert self.gc.pinned_objects_in_nursery == 0
+ assert self.gc.pin(pinned_adr)
+ assert self.gc.pinned_objects_in_nursery == 1
+ collect_func()
+ assert self.gc.pinned_objects_in_nursery == 1
+ self.gc.unpin(pinned_adr)
+ assert self.gc.pinned_objects_in_nursery == 0
+ collect_func()
+ assert self.gc.pinned_objects_in_nursery == 0
+
+ def test_pin_unpin_pinned_object_count_minor_collection(self):
+ self.pin_unpin_pinned_object_count(self.gc.minor_collection)
+
+ def test_pin_unpin_pinned_object_count_major_collection(self):
+ self.pin_unpin_pinned_object_count(self.gc.collect)
+
+
def pinned_obj_in_stackroot(self, collect_func):
# scenario: a pinned object that is part of the stack roots. Check if
# it is not moved
@@ -342,7 +410,7 @@
root_ptr = self.malloc(S)
root_ptr.someInt = 900
self.stackroots.append(root_ptr)
- assert self.stackroots[0] == old_ptr # validate assumption
+ assert self.stackroots[0] == root_ptr # validate assumption
#
pinned_ptr = self.malloc(S)
pinned_ptr.someInt = 100
@@ -364,9 +432,9 @@
assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(root_ptr))
assert self.gc.is_in_nursery(pinned_adr)
# and as 'root_ptr' object is now old, it should be tracked specially
- should_be_root_ptr = self.gc.old_objects_pointing_to_pinned.pop()
- assert should_be_root_ptr == root_ptr
- self.gc.old_objects_pointing_to_pinned.push(should_be_root_ptr)
+ should_be_root_adr = self.gc.old_objects_pointing_to_pinned.pop()
+ assert should_be_root_adr == llmemory.cast_ptr_to_adr(root_ptr)
+ self.gc.old_objects_pointing_to_pinned.append(should_be_root_adr)
# check that old object still points to the pinned one as expected
assert root_ptr.next == pinned_ptr
@@ -377,8 +445,6 @@
self.pin_referenced_from_young_in_stackroots(self.gc.collect)
- # XXX REMOVE THIS COMMENT copied ones:
-
def pin_shadow_1(self, collect_func):
ptr = self.malloc(S)
adr = llmemory.cast_ptr_to_adr(ptr)
@@ -394,12 +460,13 @@
adr = llmemory.cast_ptr_to_adr(self.stackroots[0])
assert not self.gc.is_in_nursery(adr)
- def test_pin_shadow_1_minor(self):
+ def test_pin_shadow_1_minor_collection(self):
self.pin_shadow_1(self.gc.minor_collection)
- def test_pin_shadow_1_full(self):
+ def test_pin_shadow_1_major_collection(self):
self.pin_shadow_1(self.gc.collect)
+
def pin_shadow_2(self, collect_func):
ptr = self.malloc(S)
adr = llmemory.cast_ptr_to_adr(ptr)
@@ -415,12 +482,13 @@
adr = llmemory.cast_ptr_to_adr(self.stackroots[0])
assert not self.gc.is_in_nursery(adr)
- def test_pin_shadow_2_minor(self):
+ def test_pin_shadow_2_minor_collection(self):
self.pin_shadow_2(self.gc.minor_collection)
- def test_pin_shadow_2_full(self):
+ def test_pin_shadow_2_major_collection(self):
self.pin_shadow_2(self.gc.collect)
+
def test_pin_nursery_top_scenario1(self):
ptr1 = self.malloc(S)
adr1 = llmemory.cast_ptr_to_adr(ptr1)
@@ -458,6 +526,7 @@
assert self.gc.nursery_free < self.gc.nursery_top
assert self.gc.nursery_top == self.gc.nursery_real_top
+
def test_pin_nursery_top_scenario2(self):
ptr1 = self.malloc(S)
adr1 = llmemory.cast_ptr_to_adr(ptr1)
@@ -497,6 +566,7 @@
assert self.gc.nursery_top < adr3
assert adr3 < self.gc.nursery_real_top
+
def test_pin_nursery_top_scenario3(self):
ptr1 = self.malloc(S)
adr1 = llmemory.cast_ptr_to_adr(ptr1)
@@ -538,6 +608,7 @@
assert self.gc.nursery_top < adr2
assert adr3 < self.gc.nursery_real_top
+
def test_pin_nursery_top_scenario4(self):
ptr1 = self.malloc(S)
adr1 = llmemory.cast_ptr_to_adr(ptr1)
@@ -580,6 +651,7 @@
assert self.gc.nursery_top < adr3
assert adr3 < self.gc.nursery_real_top
+
def test_pin_nursery_top_scenario5(self):
ptr1 = self.malloc(S)
adr1 = llmemory.cast_ptr_to_adr(ptr1)
@@ -640,6 +712,7 @@
# we did not reset the whole nursery
assert self.gc.nursery_top < self.gc.nursery_real_top
+
def fill_nursery_with_pinned_objects(self):
typeid = self.get_type_id(S)
size = self.gc.fixed_size(typeid) + self.gc.gcheaderbuilder.size_gc_header
@@ -654,7 +727,8 @@
def test_full_pinned_nursery_pin_fail(self):
self.fill_nursery_with_pinned_objects()
- # nursery should be full now, at least no space for another `S`. Next malloc should fail.
+ # nursery should be full now, at least no space for another `S`.
+ # Next malloc should fail.
py.test.raises(Exception, self.malloc, S)
def test_full_pinned_nursery_arena_reset(self):
@@ -693,7 +767,9 @@
self.stackroots.append(ptr)
self.gc.pin(adr)
#
- # nursery should be full now, at least no space for another `S`. Next malloc should fail.
+ # nursery should be full now, at least no space for another `S`.
+ # Next malloc should fail.
py.test.raises(Exception, self.malloc, S)
- test_full_pinned_nursery_pin_fail.GC_PARAMS = {'max_number_of_pinned_objects': 50}
+ test_full_pinned_nursery_pin_fail.GC_PARAMS = \
+ {'max_number_of_pinned_objects': 50}
From noreply at buildbot.pypy.org Tue Aug 12 14:54:53 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Tue, 12 Aug 2014 14:54:53 +0200 (CEST)
Subject: [pypy-commit] stmgc default: Add a failing test for the
preservation of the shadowstack
Message-ID: <20140812125453.AE45C1C0157@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r1296:93f047b33b57
Date: 2014-08-12 14:54 +0200
http://bitbucket.org/pypy/stmgc/changeset/93f047b33b57/
Log: Add a failing test for the preservation of the shadowstack
diff --git a/c7/test/test_rewind.c b/c7/test/test_rewind.c
--- a/c7/test/test_rewind.c
+++ b/c7/test/test_rewind.c
@@ -220,6 +220,49 @@
/************************************************************/
+typedef struct { char foo; } object_t;
+struct stm_shadowentry_s { object_t *ss; };
+typedef struct {
+ struct stm_shadowentry_s *shadowstack;
+ struct stm_shadowentry_s _inline[99];
+} stm_thread_local_t;
+#define STM_PUSH_ROOT(tl, p) ((tl).shadowstack++->ss = (object_t *)(p))
+#define STM_POP_ROOT(tl, p) ((p) = (typeof(p))((--(tl).shadowstack)->ss))
+void stm_register_thread_local(stm_thread_local_t *tl) {
+ tl->shadowstack = tl->_inline;
+}
+void stm_unregister_thread_local(stm_thread_local_t *tl) { }
+static stm_thread_local_t tl;
+
+
+void testTL1(void)
+{
+ object_t *a1, *a2;
+ stm_register_thread_local(&tl);
+
+ rewind_jmp_buf buf;
+ rewind_jmp_enterframe(>hread, &buf);
+
+ a1 = a2 = (object_t *)123456;
+ STM_PUSH_ROOT(tl, a1);
+
+ if (rewind_jmp_setjmp(>hread) == 0) {
+ /* first path */
+ STM_POP_ROOT(tl, a2);
+ assert(a1 == a2);
+ STM_PUSH_ROOT(tl, NULL);
+ rewind_jmp_longjmp(>hread);
+ }
+ /* second path */
+ STM_POP_ROOT(tl, a2);
+ assert(a1 == a2);
+
+ rewind_jmp_leaveframe(>hread, &buf);
+ stm_unregister_thread_local(&tl);
+}
+
+/************************************************************/
+
int rj_malloc_count = 0;
void *rj_malloc(size_t size)
@@ -248,6 +291,7 @@
else if (!strcmp(argv[1], "4")) test4();
else if (!strcmp(argv[1], "5")) test5();
else if (!strcmp(argv[1], "6")) test6();
+ else if (!strcmp(argv[1], "TL1")) testTL1();
else
assert(!"bad argv[1]");
assert(rj_malloc_count == 0);
diff --git a/c7/test/test_rewind.py b/c7/test/test_rewind.py
--- a/c7/test/test_rewind.py
+++ b/c7/test/test_rewind.py
@@ -1,17 +1,17 @@
import os
def run_test(opt):
- err = os.system("clang -g -O%d -Werror -DRJBUF_CUSTOM_MALLOC -I../stm"
- " -o test_rewind_O%d test_rewind.c ../stm/rewind_setjmp.c"
+ err = os.system("clang -g -O%s -Werror -DRJBUF_CUSTOM_MALLOC -I../stm"
+ " -o test_rewind_O%s test_rewind.c ../stm/rewind_setjmp.c"
% (opt, opt))
if err != 0:
raise OSError("clang failed on test_rewind.c")
- for testnum in [1, 2, 3, 4, 5, 6]:
- print '=== O%d: RUNNING TEST %d ===' % (opt, testnum)
- err = os.system("./test_rewind_O%d %d" % (opt, testnum))
+ for testnum in [1, 2, 3, 4, 5, 6, "TL1"]:
+ print '=== O%s: RUNNING TEST %s ===' % (opt, testnum)
+ err = os.system("./test_rewind_O%s %s" % (opt, testnum))
if err != 0:
- raise OSError("'test_rewind_O%d %d' failed" % (opt, testnum))
- os.unlink("./test_rewind_O%d" % (opt,))
+ raise OSError("'test_rewind_O%s %s' failed" % (opt, testnum))
+ os.unlink("./test_rewind_O%s" % (opt,))
def test_O0(): run_test(0)
def test_O1(): run_test(1)
From noreply at buildbot.pypy.org Tue Aug 12 15:58:27 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Tue, 12 Aug 2014 15:58:27 +0200 (CEST)
Subject: [pypy-commit] stmgc default: Save and restore slices of the
shadowstack in addition to slices of the
Message-ID: <20140812135827.EC9A21C326A@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r1297:438a6f00fadc
Date: 2014-08-12 15:58 +0200
http://bitbucket.org/pypy/stmgc/changeset/438a6f00fadc/
Log: Save and restore slices of the shadowstack in addition to slices of
the C stack.
diff --git a/c7/stm/rewind_setjmp.c b/c7/stm/rewind_setjmp.c
--- a/c7/stm/rewind_setjmp.c
+++ b/c7/stm/rewind_setjmp.c
@@ -7,7 +7,8 @@
struct _rewind_jmp_moved_s {
struct _rewind_jmp_moved_s *next;
- size_t size;
+ size_t stack_size;
+ size_t shadowstack_size;
};
#define RJM_HEADER sizeof(struct _rewind_jmp_moved_s)
@@ -20,28 +21,41 @@
#endif
-static void copy_stack(rewind_jmp_thread *rjthread, char *base)
+static void copy_stack(rewind_jmp_thread *rjthread, char *base, void *ssbase)
{
+ /* Copy away part of the stack and shadowstack.
+ The stack is copied between 'base' (lower limit, i.e. newest bytes)
+ and 'rjthread->head->frame_base' (upper limit, i.e. oldest bytes).
+ The shadowstack is copied between 'ssbase' (upper limit, newest)
+ and 'rjthread->head->shadowstack_base' (lower limit, oldest).
+ */
assert(rjthread->head != NULL);
char *stop = rjthread->head->frame_base;
- assert(stop > base);
+ assert(stop >= base);
+ void *ssstop = rjthread->head->shadowstack_base;
+ assert(ssstop <= ssbase);
struct _rewind_jmp_moved_s *next = (struct _rewind_jmp_moved_s *)
- rj_malloc(RJM_HEADER + (stop - base));
+ rj_malloc(RJM_HEADER + (stop - base) + (ssbase - ssstop));
assert(next != NULL); /* XXX out of memory */
next->next = rjthread->moved_off;
- next->size = stop - base;
+ next->stack_size = stop - base;
+ next->shadowstack_size = ssbase - ssstop;
memcpy(((char *)next) + RJM_HEADER, base, stop - base);
+ memcpy(((char *)next) + RJM_HEADER + (stop - base), ssstop,
+ ssbase - ssstop);
rjthread->moved_off_base = stop;
+ rjthread->moved_off_ssbase = ssstop;
rjthread->moved_off = next;
}
__attribute__((noinline))
-long rewind_jmp_setjmp(rewind_jmp_thread *rjthread)
+long rewind_jmp_setjmp(rewind_jmp_thread *rjthread, void *ss)
{
if (rjthread->moved_off) {
_rewind_jmp_free_stack_slices(rjthread);
}
+ void *volatile ss1 = ss;
rewind_jmp_thread *volatile rjthread1 = rjthread;
int result;
if (__builtin_setjmp(rjthread->jmpbuf) == 0) {
@@ -55,7 +69,7 @@
result = rjthread->repeat_count + 1;
}
rjthread->repeat_count = result;
- copy_stack(rjthread, (char *)&rjthread1);
+ copy_stack(rjthread, (char *)&rjthread1, ss1);
return result;
}
@@ -67,13 +81,20 @@
while (rjthread->moved_off) {
struct _rewind_jmp_moved_s *p = rjthread->moved_off;
char *target = rjthread->moved_off_base;
- target -= p->size;
+ target -= p->stack_size;
if (target < stack_free) {
/* need more stack space! */
do_longjmp(rjthread, alloca(stack_free - target));
}
- memcpy(target, ((char *)p) + RJM_HEADER, p->size);
+ memcpy(target, ((char *)p) + RJM_HEADER, p->stack_size);
+
+ char *sstarget = rjthread->moved_off_ssbase;
+ char *ssend = sstarget + p->shadowstack_size;
+ memcpy(sstarget, ((char *)p) + RJM_HEADER + p->stack_size,
+ p->shadowstack_size);
+
rjthread->moved_off_base = target;
+ rjthread->moved_off_ssbase = ssend;
rjthread->moved_off = p->next;
rj_free(p);
}
@@ -95,7 +116,7 @@
return;
}
assert(rjthread->moved_off_base < (char *)rjthread->head);
- copy_stack(rjthread, rjthread->moved_off_base);
+ copy_stack(rjthread, rjthread->moved_off_base, rjthread->moved_off_ssbase);
}
void _rewind_jmp_free_stack_slices(rewind_jmp_thread *rjthread)
diff --git a/c7/stm/rewind_setjmp.h b/c7/stm/rewind_setjmp.h
--- a/c7/stm/rewind_setjmp.h
+++ b/c7/stm/rewind_setjmp.h
@@ -41,6 +41,7 @@
typedef struct _rewind_jmp_buf {
char *frame_base;
+ char *shadowstack_base;
struct _rewind_jmp_buf *prev;
} rewind_jmp_buf;
@@ -48,30 +49,36 @@
rewind_jmp_buf *head;
rewind_jmp_buf *initial_head;
char *moved_off_base;
+ char *moved_off_ssbase;
struct _rewind_jmp_moved_s *moved_off;
void *jmpbuf[5];
long repeat_count;
} rewind_jmp_thread;
-#define rewind_jmp_enterframe(rjthread, rjbuf) do { \
- (rjbuf)->frame_base = __builtin_frame_address(0); \
- (rjbuf)->prev = (rjthread)->head; \
- (rjthread)->head = (rjbuf); \
+#define rewind_jmp_enterframe(rjthread, rjbuf, ss) do { \
+ (rjbuf)->frame_base = __builtin_frame_address(0); \
+ (rjbuf)->shadowstack_base = (char *)(ss); \
+ (rjbuf)->prev = (rjthread)->head; \
+ (rjthread)->head = (rjbuf); \
} while (0)
-#define rewind_jmp_leaveframe(rjthread, rjbuf) do { \
- (rjthread)->head = (rjbuf)->prev; \
- if ((rjbuf)->frame_base == (rjthread)->moved_off_base) \
- _rewind_jmp_copy_stack_slice(rjthread); \
+#define rewind_jmp_leaveframe(rjthread, rjbuf, ss) do { \
+ assert((rjbuf)->shadowstack_base == (char *)(ss)); \
+ (rjthread)->head = (rjbuf)->prev; \
+ if ((rjbuf)->frame_base == (rjthread)->moved_off_base) { \
+ assert((rjthread)->moved_off_ssbase == (char *)(ss));\
+ _rewind_jmp_copy_stack_slice(rjthread); \
+ } \
} while (0)
-long rewind_jmp_setjmp(rewind_jmp_thread *rjthread);
+long rewind_jmp_setjmp(rewind_jmp_thread *rjthread, void *ss);
void rewind_jmp_longjmp(rewind_jmp_thread *rjthread) __attribute__((noreturn));
#define rewind_jmp_forget(rjthread) do { \
if ((rjthread)->moved_off) _rewind_jmp_free_stack_slices(rjthread); \
(rjthread)->moved_off_base = 0; \
+ (rjthread)->moved_off_ssbase = 0; \
} while (0)
void _rewind_jmp_copy_stack_slice(rewind_jmp_thread *);
diff --git a/c7/test/test_rewind.c b/c7/test/test_rewind.c
--- a/c7/test/test_rewind.c
+++ b/c7/test/test_rewind.c
@@ -43,10 +43,10 @@
void test1(void)
{
rewind_jmp_buf buf;
- rewind_jmp_enterframe(>hread, &buf);
+ rewind_jmp_enterframe(>hread, &buf, NULL);
test1_x = 0;
- rewind_jmp_setjmp(>hread);
+ rewind_jmp_setjmp(>hread, NULL);
test1_x++;
f1(test1_x);
@@ -59,7 +59,7 @@
rewind_jmp_forget(>hread);
assert(!rewind_jmp_armed(>hread));
- rewind_jmp_leaveframe(>hread, &buf);
+ rewind_jmp_leaveframe(>hread, &buf, NULL);
}
/************************************************************/
@@ -70,22 +70,22 @@
int f2(void)
{
rewind_jmp_buf buf;
- rewind_jmp_enterframe(>hread, &buf);
+ rewind_jmp_enterframe(>hread, &buf, NULL);
test2_x = 0;
- rewind_jmp_setjmp(>hread);
- rewind_jmp_leaveframe(>hread, &buf);
+ rewind_jmp_setjmp(>hread, NULL);
+ rewind_jmp_leaveframe(>hread, &buf, NULL);
return ++test2_x;
}
void test2(void)
{
rewind_jmp_buf buf;
- rewind_jmp_enterframe(>hread, &buf);
+ rewind_jmp_enterframe(>hread, &buf, NULL);
int x = f2();
gevent(x);
if (x < 10)
rewind_jmp_longjmp(>hread);
- rewind_jmp_leaveframe(>hread, &buf);
+ rewind_jmp_leaveframe(>hread, &buf, NULL);
int expected[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
CHECK(expected);
}
@@ -104,12 +104,12 @@
void test3(void)
{
rewind_jmp_buf buf;
- rewind_jmp_enterframe(>hread, &buf);
+ rewind_jmp_enterframe(>hread, &buf, NULL);
int x = f3(50);
gevent(x);
if (x < 10)
rewind_jmp_longjmp(>hread);
- rewind_jmp_leaveframe(>hread, &buf);
+ rewind_jmp_leaveframe(>hread, &buf, NULL);
int expected[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
CHECK(expected);
}
@@ -120,25 +120,25 @@
int f4(int rec)
{
rewind_jmp_buf buf;
- rewind_jmp_enterframe(>hread, &buf);
+ rewind_jmp_enterframe(>hread, &buf, NULL);
int res;
if (rec > 0)
res = f4(rec - 1);
else
res = f2();
- rewind_jmp_leaveframe(>hread, &buf);
+ rewind_jmp_leaveframe(>hread, &buf, NULL);
return res;
}
void test4(void)
{
rewind_jmp_buf buf;
- rewind_jmp_enterframe(>hread, &buf);
+ rewind_jmp_enterframe(>hread, &buf, NULL);
int x = f4(5);
gevent(x);
if (x < 10)
rewind_jmp_longjmp(>hread);
- rewind_jmp_leaveframe(>hread, &buf);
+ rewind_jmp_leaveframe(>hread, &buf, NULL);
int expected[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
CHECK(expected);
}
@@ -148,11 +148,11 @@
void test5(void)
{
struct { int a; rewind_jmp_buf buf; int b; } sbuf;
- rewind_jmp_enterframe(>hread, &sbuf.buf);
+ rewind_jmp_enterframe(>hread, &sbuf.buf, NULL);
sbuf.a = 42;
sbuf.b = -42;
test2_x = 0;
- rewind_jmp_setjmp(>hread);
+ rewind_jmp_setjmp(>hread, NULL);
sbuf.a++;
sbuf.b--;
gevent(sbuf.a);
@@ -163,7 +163,7 @@
}
int expected[] = {43, -43, 43, -43};
CHECK(expected);
- rewind_jmp_leaveframe(>hread, &sbuf.buf);
+ rewind_jmp_leaveframe(>hread, &sbuf.buf, NULL);
}
/************************************************************/
@@ -178,9 +178,9 @@
int a8, int a9, int a10, int a11, int a12, int a13)
{
rewind_jmp_buf buf;
- rewind_jmp_enterframe(>hread, &buf);
+ rewind_jmp_enterframe(>hread, &buf, NULL);
- rewind_jmp_setjmp(>hread);
+ rewind_jmp_setjmp(>hread, NULL);
gevent(a1); gevent(a2); gevent(a3); gevent(a4);
gevent(a5); gevent(a6); gevent(a7); gevent(a8);
gevent(a9); gevent(a10); gevent(a11); gevent(a12);
@@ -201,16 +201,16 @@
foo(&a13);
rewind_jmp_longjmp(>hread);
}
- rewind_jmp_leaveframe(>hread, &buf);
+ rewind_jmp_leaveframe(>hread, &buf, NULL);
}
void test6(void)
{
rewind_jmp_buf buf;
- rewind_jmp_enterframe(>hread, &buf);
+ rewind_jmp_enterframe(>hread, &buf, NULL);
test6_x = 0;
f6(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13);
- rewind_jmp_leaveframe(>hread, &buf);
+ rewind_jmp_leaveframe(>hread, &buf, NULL);
int expected[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
@@ -220,45 +220,64 @@
/************************************************************/
-typedef struct { char foo; } object_t;
-struct stm_shadowentry_s { object_t *ss; };
-typedef struct {
- struct stm_shadowentry_s *shadowstack;
- struct stm_shadowentry_s _inline[99];
-} stm_thread_local_t;
-#define STM_PUSH_ROOT(tl, p) ((tl).shadowstack++->ss = (object_t *)(p))
-#define STM_POP_ROOT(tl, p) ((p) = (typeof(p))((--(tl).shadowstack)->ss))
-void stm_register_thread_local(stm_thread_local_t *tl) {
- tl->shadowstack = tl->_inline;
-}
-void stm_unregister_thread_local(stm_thread_local_t *tl) { }
-static stm_thread_local_t tl;
-
+static void *ssarray[99];
void testTL1(void)
{
- object_t *a1, *a2;
- stm_register_thread_local(&tl);
+ void *a4, *a5;
+ rewind_jmp_buf buf;
+ rewind_jmp_enterframe(>hread, &buf, ssarray+5);
- rewind_jmp_buf buf;
- rewind_jmp_enterframe(>hread, &buf);
+ a4 = (void *)444444;
+ a5 = (void *)555555;
+ ssarray[4] = a4;
+ ssarray[5] = a5;
- a1 = a2 = (object_t *)123456;
- STM_PUSH_ROOT(tl, a1);
-
- if (rewind_jmp_setjmp(>hread) == 0) {
+ if (rewind_jmp_setjmp(>hread, ssarray+6) == 0) {
/* first path */
- STM_POP_ROOT(tl, a2);
- assert(a1 == a2);
- STM_PUSH_ROOT(tl, NULL);
+ assert(ssarray[4] == a4);
+ assert(ssarray[5] == a5);
+ ssarray[4] = NULL;
+ ssarray[5] = NULL;
rewind_jmp_longjmp(>hread);
}
/* second path */
- STM_POP_ROOT(tl, a2);
- assert(a1 == a2);
+ assert(ssarray[4] == NULL); /* was not saved */
+ assert(ssarray[5] == a5); /* saved and restored */
- rewind_jmp_leaveframe(>hread, &buf);
- stm_unregister_thread_local(&tl);
+ rewind_jmp_leaveframe(>hread, &buf, ssarray+5);
+}
+
+__attribute__((noinline))
+int gtl2(void)
+{
+ rewind_jmp_buf buf;
+ rewind_jmp_enterframe(>hread, &buf, ssarray+5);
+ ssarray[5] = (void *)555555;
+
+ int result = rewind_jmp_setjmp(>hread, ssarray+6);
+
+ assert(ssarray[4] == (void *)444444);
+ assert(ssarray[5] == (void *)555555);
+ ssarray[5] = NULL;
+
+ rewind_jmp_leaveframe(>hread, &buf, ssarray+5);
+ return result;
+}
+
+void testTL2(void)
+{
+ rewind_jmp_buf buf;
+ rewind_jmp_enterframe(>hread, &buf, ssarray+4);
+
+ ssarray[4] = (void *)444444;
+ int result = gtl2();
+ ssarray[4] = NULL;
+
+ if (result == 0)
+ rewind_jmp_longjmp(>hread);
+
+ rewind_jmp_leaveframe(>hread, &buf, ssarray+4);
}
/************************************************************/
@@ -292,6 +311,7 @@
else if (!strcmp(argv[1], "5")) test5();
else if (!strcmp(argv[1], "6")) test6();
else if (!strcmp(argv[1], "TL1")) testTL1();
+ else if (!strcmp(argv[1], "TL2")) testTL2();
else
assert(!"bad argv[1]");
assert(rj_malloc_count == 0);
diff --git a/c7/test/test_rewind.py b/c7/test/test_rewind.py
--- a/c7/test/test_rewind.py
+++ b/c7/test/test_rewind.py
@@ -6,7 +6,7 @@
% (opt, opt))
if err != 0:
raise OSError("clang failed on test_rewind.c")
- for testnum in [1, 2, 3, 4, 5, 6, "TL1"]:
+ for testnum in [1, 2, 3, 4, 5, 6, "TL1", "TL2"]:
print '=== O%s: RUNNING TEST %s ===' % (opt, testnum)
err = os.system("./test_rewind_O%s %s" % (opt, testnum))
if err != 0:
From noreply at buildbot.pypy.org Tue Aug 12 16:00:44 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Tue, 12 Aug 2014 16:00:44 +0200 (CEST)
Subject: [pypy-commit] stmgc default: Add an assert
Message-ID: <20140812140044.542201C326A@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r1298:c84f87cc1dad
Date: 2014-08-12 16:00 +0200
http://bitbucket.org/pypy/stmgc/changeset/c84f87cc1dad/
Log: Add an assert
diff --git a/c7/stm/rewind_setjmp.c b/c7/stm/rewind_setjmp.c
--- a/c7/stm/rewind_setjmp.c
+++ b/c7/stm/rewind_setjmp.c
@@ -67,6 +67,8 @@
rjthread = rjthread1;
rjthread->head = rjthread->initial_head;
result = rjthread->repeat_count + 1;
+ /* check that the shadowstack was correctly restored */
+ assert(rjthread->moved_off_ssbase == ss1);
}
rjthread->repeat_count = result;
copy_stack(rjthread, (char *)&rjthread1, ss1);
From noreply at buildbot.pypy.org Tue Aug 12 16:05:16 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Tue, 12 Aug 2014 16:05:16 +0200 (CEST)
Subject: [pypy-commit] stmgc default: Fix
Message-ID: <20140812140516.C533A1C12CC@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r1299:99aae29fc053
Date: 2014-08-12 16:05 +0200
http://bitbucket.org/pypy/stmgc/changeset/99aae29fc053/
Log: Fix
diff --git a/c7/stm/rewind_setjmp.c b/c7/stm/rewind_setjmp.c
--- a/c7/stm/rewind_setjmp.c
+++ b/c7/stm/rewind_setjmp.c
@@ -55,23 +55,26 @@
if (rjthread->moved_off) {
_rewind_jmp_free_stack_slices(rjthread);
}
- void *volatile ss1 = ss;
- rewind_jmp_thread *volatile rjthread1 = rjthread;
+ /* all locals of this function that need to be saved and restored
+ across the setjmp() should be stored inside this structure */
+ struct { void *ss1; rewind_jmp_thread *rjthread1; } volatile saved =
+ { ss, rjthread };
+
int result;
if (__builtin_setjmp(rjthread->jmpbuf) == 0) {
- rjthread = rjthread1;
+ rjthread = saved.rjthread1;
rjthread->initial_head = rjthread->head;
result = 0;
}
else {
- rjthread = rjthread1;
+ rjthread = saved.rjthread1;
rjthread->head = rjthread->initial_head;
result = rjthread->repeat_count + 1;
/* check that the shadowstack was correctly restored */
- assert(rjthread->moved_off_ssbase == ss1);
+ assert(rjthread->moved_off_ssbase == saved.ss1);
}
rjthread->repeat_count = result;
- copy_stack(rjthread, (char *)&rjthread1, ss1);
+ copy_stack(rjthread, (char *)&saved, saved.ss1);
return result;
}
From noreply at buildbot.pypy.org Tue Aug 12 16:43:01 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Tue, 12 Aug 2014 16:43:01 +0200 (CEST)
Subject: [pypy-commit] stmgc default: in-progress
Message-ID: <20140812144301.0D1181C0547@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r1300:2866cee6ce00
Date: 2014-08-12 16:29 +0200
http://bitbucket.org/pypy/stmgc/changeset/2866cee6ce00/
Log: in-progress
diff --git a/c7/stm/core.c b/c7/stm/core.c
--- a/c7/stm/core.c
+++ b/c7/stm/core.c
@@ -393,7 +393,7 @@
#ifdef STM_NO_AUTOMATIC_SETJMP
long repeat_count = 0; /* test/support.py */
#else
- long repeat_count = rewind_jmp_setjmp(&tl->rjthread);
+ long repeat_count = stm_rewind_jmp_setjmp(tl);
#endif
_stm_start_transaction(tl, false);
return repeat_count;
@@ -828,7 +828,7 @@
dprintf(("commit_transaction\n"));
assert(STM_SEGMENT->nursery_end == NURSERY_END);
- rewind_jmp_forget(&STM_SEGMENT->running_thread->rjthread);
+ stm_rewind_jmp_forget(STM_SEGMENT->running_thread);
/* if a major collection is required, do it here */
if (is_major_collection_requested()) {
@@ -983,12 +983,12 @@
reset_modified_from_other_segments(segment_num);
_verify_cards_cleared_in_all_lists(pseg);
- /* reset the tl->shadowstack and thread_local_obj to their original
- value before the transaction start */
+ /* reset tl->shadowstack and thread_local_obj to their original
+ value before the transaction start. Also restore the content
+ of the shadowstack here. */
stm_thread_local_t *tl = pseg->pub.running_thread;
- assert(tl->shadowstack >= pseg->shadowstack_at_start_of_transaction);
- pseg->shadowstack_at_abort = tl->shadowstack;
- tl->shadowstack = pseg->shadowstack_at_start_of_transaction;
+ stm_rewind_jmp_restore_shadowstack(tl);
+ assert(tl->shadowstack == pseg->shadowstack_at_start_of_transaction);
tl->thread_local_obj = pseg->threadlocal_at_start_of_transaction;
tl->last_abort__bytes_in_nursery = bytes_in_nursery;
@@ -1063,7 +1063,7 @@
#ifdef STM_NO_AUTOMATIC_SETJMP
_test_run_abort(tl);
#else
- rewind_jmp_longjmp(&tl->rjthread);
+ stm_rewind_jmp_longjmp(tl);
#endif
}
@@ -1078,7 +1078,7 @@
marker_fetch_inev();
wait_for_end_of_inevitable_transaction(NULL);
STM_PSEGMENT->transaction_state = TS_INEVITABLE;
- rewind_jmp_forget(&STM_SEGMENT->running_thread->rjthread);
+ stm_rewind_jmp_forget(STM_SEGMENT->running_thread);
clear_callbacks_on_abort();
}
else {
diff --git a/c7/stm/core.h b/c7/stm/core.h
--- a/c7/stm/core.h
+++ b/c7/stm/core.h
@@ -186,7 +186,6 @@
'thread_local_obj' field. */
struct stm_shadowentry_s *shadowstack_at_start_of_transaction;
object_t *threadlocal_at_start_of_transaction;
- struct stm_shadowentry_s *shadowstack_at_abort;
/* Already signalled to commit soon: */
bool signalled_to_commit_soon;
diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c
--- a/c7/stm/forksupport.c
+++ b/c7/stm/forksupport.c
@@ -184,7 +184,7 @@
rewind_jmp_buf rjbuf;
stm_rewind_jmp_enterframe(tl, &rjbuf);
- if (rewind_jmp_setjmp(&tl->rjthread) == 0) {
+ if (stm_rewind_jmp_setjmp(tl) == 0) {
#ifndef NDEBUG
pr->running_pthread = pthread_self();
#endif
@@ -193,7 +193,7 @@
strcpy(pr->marker_self, "fork");
stm_abort_transaction();
}
- rewind_jmp_forget(&tl->rjthread);
+ stm_rewind_jmp_forget(tl);
stm_rewind_jmp_leaveframe(tl, &rjbuf);
}
diff --git a/c7/stm/rewind_setjmp.c b/c7/stm/rewind_setjmp.c
--- a/c7/stm/rewind_setjmp.c
+++ b/c7/stm/rewind_setjmp.c
@@ -93,13 +93,7 @@
}
memcpy(target, ((char *)p) + RJM_HEADER, p->stack_size);
- char *sstarget = rjthread->moved_off_ssbase;
- char *ssend = sstarget + p->shadowstack_size;
- memcpy(sstarget, ((char *)p) + RJM_HEADER + p->stack_size,
- p->shadowstack_size);
-
rjthread->moved_off_base = target;
- rjthread->moved_off_ssbase = ssend;
rjthread->moved_off = p->next;
rj_free(p);
}
@@ -113,6 +107,22 @@
do_longjmp(rjthread, &_rewind_jmp_marker);
}
+char *rewind_jmp_restore_shadowstack(rewind_jmp_thread *rjthread)
+{
+ struct _rewind_jmp_moved_s *p = rjthread->moved_off;
+ char *sstarget = rjthread->moved_off_ssbase;
+
+ while (p) {
+ char *ssend = sstarget + p->shadowstack_size;
+ memcpy(sstarget, ((char *)p) + RJM_HEADER + p->stack_size,
+ p->shadowstack_size);
+ sstarget = ssend;
+ p = p->next;
+ }
+ rjthread->moved_off_ssbase = sstarget;
+ return sstarget;
+}
+
__attribute__((noinline))
void _rewind_jmp_copy_stack_slice(rewind_jmp_thread *rjthread)
{
diff --git a/c7/stm/rewind_setjmp.h b/c7/stm/rewind_setjmp.h
--- a/c7/stm/rewind_setjmp.h
+++ b/c7/stm/rewind_setjmp.h
@@ -74,6 +74,7 @@
long rewind_jmp_setjmp(rewind_jmp_thread *rjthread, void *ss);
void rewind_jmp_longjmp(rewind_jmp_thread *rjthread) __attribute__((noreturn));
+char *rewind_jmp_restore_shadowstack(rewind_jmp_thread *rjthread);
#define rewind_jmp_forget(rjthread) do { \
if ((rjthread)->moved_off) _rewind_jmp_free_stack_slices(rjthread); \
diff --git a/c7/stmgc.h b/c7/stmgc.h
--- a/c7/stmgc.h
+++ b/c7/stmgc.h
@@ -331,9 +331,18 @@
function with the interpreter's dispatch loop, you need to declare
a local variable of type 'rewind_jmp_buf' and call these macros. */
#define stm_rewind_jmp_enterframe(tl, rjbuf) \
- rewind_jmp_enterframe(&(tl)->rjthread, rjbuf)
+ rewind_jmp_enterframe(&(tl)->rjthread, rjbuf, (tl)->shadowstack)
#define stm_rewind_jmp_leaveframe(tl, rjbuf) \
- rewind_jmp_leaveframe(&(tl)->rjthread, rjbuf)
+ rewind_jmp_leaveframe(&(tl)->rjthread, rjbuf, (tl)->shadowstack)
+#define stm_rewind_jmp_setjmp(tl) \
+ rewind_jmp_setjmp(&(tl)->rjthread, (tl)->shadowstack)
+#define stm_rewind_jmp_longjmp(tl) \
+ rewind_jmp_longjmp(&(tl)->rjthread)
+#define stm_rewind_jmp_forget(tl) \
+ rewind_jmp_forget(&(tl)->rjthread)
+#define stm_rewind_jmp_restore_shadowstack(tl) \
+ ((tl)->shadowstack = (struct stm_shadowentry_s *) \
+ rewind_jmp_restore_shadowstack(&(tl)->rjthread))
/* Starting and ending transactions. stm_read(), stm_write() and
stm_allocate() should only be called from within a transaction.
diff --git a/c7/test/test_rewind.c b/c7/test/test_rewind.c
--- a/c7/test/test_rewind.c
+++ b/c7/test/test_rewind.c
@@ -239,6 +239,7 @@
assert(ssarray[5] == a5);
ssarray[4] = NULL;
ssarray[5] = NULL;
+ rewind_jmp_restore_shadowstack(>hread);
rewind_jmp_longjmp(>hread);
}
/* second path */
@@ -274,8 +275,10 @@
int result = gtl2();
ssarray[4] = NULL;
- if (result == 0)
+ if (result == 0) {
+ rewind_jmp_restore_shadowstack(>hread);
rewind_jmp_longjmp(>hread);
+ }
rewind_jmp_leaveframe(>hread, &buf, ssarray+4);
}
From noreply at buildbot.pypy.org Tue Aug 12 16:43:02 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Tue, 12 Aug 2014 16:43:02 +0200 (CEST)
Subject: [pypy-commit] stmgc default: In tests,
we don't save and restore the shadowstack correctly.
Message-ID: <20140812144302.3D2381C0547@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r1301:e691086e6ef0
Date: 2014-08-12 16:43 +0200
http://bitbucket.org/pypy/stmgc/changeset/e691086e6ef0/
Log: In tests, we don't save and restore the shadowstack correctly.
diff --git a/c7/stm/core.c b/c7/stm/core.c
--- a/c7/stm/core.c
+++ b/c7/stm/core.c
@@ -987,8 +987,16 @@
value before the transaction start. Also restore the content
of the shadowstack here. */
stm_thread_local_t *tl = pseg->pub.running_thread;
+#ifdef STM_NO_AUTOMATIC_SETJMP
+ /* In tests, we don't save and restore the shadowstack correctly.
+ Be sure to not change items below shadowstack_at_start_of_transaction.
+ There is no such restrictions in non-Python-based tests. */
+ assert(tl->shadowstack >= pseg->shadowstack_at_start_of_transaction);
+ tl->shadowstack = pseg->shadowstack_at_start_of_transaction;
+#else
stm_rewind_jmp_restore_shadowstack(tl);
assert(tl->shadowstack == pseg->shadowstack_at_start_of_transaction);
+#endif
tl->thread_local_obj = pseg->threadlocal_at_start_of_transaction;
tl->last_abort__bytes_in_nursery = bytes_in_nursery;
diff --git a/c7/stmgc.h b/c7/stmgc.h
--- a/c7/stmgc.h
+++ b/c7/stmgc.h
@@ -340,8 +340,8 @@
rewind_jmp_longjmp(&(tl)->rjthread)
#define stm_rewind_jmp_forget(tl) \
rewind_jmp_forget(&(tl)->rjthread)
-#define stm_rewind_jmp_restore_shadowstack(tl) \
- ((tl)->shadowstack = (struct stm_shadowentry_s *) \
+#define stm_rewind_jmp_restore_shadowstack(tl) \
+ ((tl)->shadowstack = (struct stm_shadowentry_s *) \
rewind_jmp_restore_shadowstack(&(tl)->rjthread))
/* Starting and ending transactions. stm_read(), stm_write() and
diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py
--- a/c7/test/test_basic.py
+++ b/c7/test/test_basic.py
@@ -427,6 +427,19 @@
self.abort_transaction()
py.test.raises(EmptyStack, self.pop_root)
+ def test_abort_restores_shadowstack_inv(self):
+ py.test.skip("the logic to save/restore the shadowstack doesn't "
+ "work in these tests")
+ self.push_root(ffi.cast("object_t *", 1234))
+ self.start_transaction()
+ p = self.pop_root()
+ assert p == ffi.cast("object_t *", 1234)
+ self.push_root(ffi.cast("object_t *", 5678))
+ self.pop_root()
+ self.abort_transaction()
+ p = self.pop_root()
+ assert p == ffi.cast("object_t *", 1234)
+
def test_check_content_after_commit(self):
self.start_transaction()
lp1 = stm_allocate(16)
From noreply at buildbot.pypy.org Tue Aug 12 16:49:00 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Tue, 12 Aug 2014 16:49:00 +0200 (CEST)
Subject: [pypy-commit] pypy.org extradoc: Rephrase
Message-ID: <20140812144900.1E85A1C0793@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: extradoc
Changeset: r524:a3ef8f99cc06
Date: 2014-08-12 16:49 +0200
http://bitbucket.org/pypy/pypy.org/changeset/a3ef8f99cc06/
Log: Rephrase
diff --git a/index.html b/index.html
--- a/index.html
+++ b/index.html
@@ -51,8 +51,8 @@
- Speed: thanks to its Just-in-Time compiler, Python programs
often run faster on PyPy. (What is a JIT compiler?)
-- Memory usage: large, memory-hungry Python programs might end up
-taking less space than they do in CPython.
+- Memory usage: memory-hungry Python programs (many hundreds of
+MBs and above) might end up taking less space than they do in CPython.
- Compatibility: PyPy is highly compatible with existing python code.
It supports cffi and can run popular python libraries like twisted
and django.
diff --git a/source/index.txt b/source/index.txt
--- a/source/index.txt
+++ b/source/index.txt
@@ -9,8 +9,8 @@
* **Speed:** thanks to its Just-in-Time compiler, Python programs
often run `faster`_ on PyPy. `(What is a JIT compiler?)`_
- * **Memory usage:** large, memory-hungry Python programs might end up
- taking `less space`_ than they do in CPython.
+ * **Memory usage:** memory-hungry Python programs (many hundreds of
+ MBs and above) might end up taking `less space`_ than they do in CPython.
* **Compatibility:** PyPy is `highly compatible`_ with existing python code.
It supports `cffi`_ and can run popular python libraries like `twisted`_
From noreply at buildbot.pypy.org Tue Aug 12 16:51:10 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Tue, 12 Aug 2014 16:51:10 +0200 (CEST)
Subject: [pypy-commit] pypy.org extradoc: Englishify?
Message-ID: <20140812145110.BD3C71C03AC@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: extradoc
Changeset: r525:26176c314bdd
Date: 2014-08-12 16:51 +0200
http://bitbucket.org/pypy/pypy.org/changeset/26176c314bdd/
Log: Englishify?
diff --git a/index.html b/index.html
--- a/index.html
+++ b/index.html
@@ -51,8 +51,8 @@
- Speed: thanks to its Just-in-Time compiler, Python programs
often run faster on PyPy. (What is a JIT compiler?)
-- Memory usage: memory-hungry Python programs (many hundreds of
-MBs and above) might end up taking less space than they do in CPython.
+- Memory usage: memory-hungry Python programs (several hundreds of
+MBs or more) might end up taking less space than they do in CPython.
- Compatibility: PyPy is highly compatible with existing python code.
It supports cffi and can run popular python libraries like twisted
and django.
diff --git a/source/index.txt b/source/index.txt
--- a/source/index.txt
+++ b/source/index.txt
@@ -9,8 +9,8 @@
* **Speed:** thanks to its Just-in-Time compiler, Python programs
often run `faster`_ on PyPy. `(What is a JIT compiler?)`_
- * **Memory usage:** memory-hungry Python programs (many hundreds of
- MBs and above) might end up taking `less space`_ than they do in CPython.
+ * **Memory usage:** memory-hungry Python programs (several hundreds of
+ MBs or more) might end up taking `less space`_ than they do in CPython.
* **Compatibility:** PyPy is `highly compatible`_ with existing python code.
It supports `cffi`_ and can run popular python libraries like `twisted`_
From noreply at buildbot.pypy.org Tue Aug 12 17:04:23 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Tue, 12 Aug 2014 17:04:23 +0200 (CEST)
Subject: [pypy-commit] stmgc default: debugging...
Message-ID: <20140812150423.BBA681C0793@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r1302:b1ce5fe3d056
Date: 2014-08-12 17:04 +0200
http://bitbucket.org/pypy/stmgc/changeset/b1ce5fe3d056/
Log: debugging...
diff --git a/c7/stm/core.c b/c7/stm/core.c
--- a/c7/stm/core.c
+++ b/c7/stm/core.c
@@ -994,6 +994,9 @@
assert(tl->shadowstack >= pseg->shadowstack_at_start_of_transaction);
tl->shadowstack = pseg->shadowstack_at_start_of_transaction;
#else
+ /* NB. careful, this function might be called more than once to
+ abort a given segment. Make sure that
+ stm_rewind_jmp_restore_shadowstack() is idempotent. */
stm_rewind_jmp_restore_shadowstack(tl);
assert(tl->shadowstack == pseg->shadowstack_at_start_of_transaction);
#endif
diff --git a/c7/stm/rewind_setjmp.c b/c7/stm/rewind_setjmp.c
--- a/c7/stm/rewind_setjmp.c
+++ b/c7/stm/rewind_setjmp.c
@@ -70,8 +70,6 @@
rjthread = saved.rjthread1;
rjthread->head = rjthread->initial_head;
result = rjthread->repeat_count + 1;
- /* check that the shadowstack was correctly restored */
- assert(rjthread->moved_off_ssbase == saved.ss1);
}
rjthread->repeat_count = result;
copy_stack(rjthread, (char *)&saved, saved.ss1);
@@ -119,7 +117,6 @@
sstarget = ssend;
p = p->next;
}
- rjthread->moved_off_ssbase = sstarget;
return sstarget;
}
diff --git a/c7/stmgc.h b/c7/stmgc.h
--- a/c7/stmgc.h
+++ b/c7/stmgc.h
@@ -340,9 +340,11 @@
rewind_jmp_longjmp(&(tl)->rjthread)
#define stm_rewind_jmp_forget(tl) \
rewind_jmp_forget(&(tl)->rjthread)
-#define stm_rewind_jmp_restore_shadowstack(tl) \
- ((tl)->shadowstack = (struct stm_shadowentry_s *) \
- rewind_jmp_restore_shadowstack(&(tl)->rjthread))
+#define stm_rewind_jmp_restore_shadowstack(tl) do { \
+ assert(rewind_jmp_armed(&(tl)->rjthread)); \
+ (tl)->shadowstack = (struct stm_shadowentry_s *) \
+ rewind_jmp_restore_shadowstack(&(tl)->rjthread); \
+} while (0)
/* Starting and ending transactions. stm_read(), stm_write() and
stm_allocate() should only be called from within a transaction.
From noreply at buildbot.pypy.org Tue Aug 12 17:08:20 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Tue, 12 Aug 2014 17:08:20 +0200 (CEST)
Subject: [pypy-commit] stmgc default: Bug fix: now the logic will complain
if the shadow stack is not
Message-ID: <20140812150820.6DF621C0EE9@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r1303:1815f493a1c5
Date: 2014-08-12 17:08 +0200
http://bitbucket.org/pypy/stmgc/changeset/1815f493a1c5/
Log: Bug fix: now the logic will complain if the shadow stack is not
correctly balanced.
diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c
--- a/c7/demo/demo2.c
+++ b/c7/demo/demo2.c
@@ -208,6 +208,11 @@
printf("setup ok\n");
}
+void teardown_list(void)
+{
+ STM_POP_ROOT_RET(stm_thread_local);
+}
+
static sem_t done;
@@ -303,6 +308,7 @@
final_check();
+ teardown_list();
stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf);
unregister_thread_local();
From noreply at buildbot.pypy.org Tue Aug 12 17:50:05 2014
From: noreply at buildbot.pypy.org (Raemi)
Date: Tue, 12 Aug 2014 17:50:05 +0200 (CEST)
Subject: [pypy-commit] stmgc default: clean up shadowstack in demo_random
for leaveframe
Message-ID: <20140812155005.DA8081C0EE9@cobra.cs.uni-duesseldorf.de>
Author: Remi Meier
Branch:
Changeset: r1304:8e8c594bbad1
Date: 2014-08-12 17:51 +0200
http://bitbucket.org/pypy/stmgc/changeset/8e8c594bbad1/
Log: clean up shadowstack in demo_random for leaveframe
diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c
--- a/c7/demo/demo_random.c
+++ b/c7/demo/demo_random.c
@@ -393,6 +393,16 @@
}
}
}
+ push_roots();
+ stm_commit_transaction();
+
+ /* even out the shadow stack before leaveframe: */
+ stm_start_inevitable_transaction(&stm_thread_local);
+ while (td.num_roots > 0) {
+ td.num_roots--;
+ objptr_t t;
+ STM_POP_ROOT(stm_thread_local, t);
+ }
stm_commit_transaction();
stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf);
From noreply at buildbot.pypy.org Tue Aug 12 18:08:41 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Tue, 12 Aug 2014 18:08:41 +0200 (CEST)
Subject: [pypy-commit] stmgc default: Kill STM_STACK_MARKER_{NEW,
OLD} and use 'moved_off_ssbase' instead.
Message-ID: <20140812160841.111791C0EE9@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r1305:50d22a5baf26
Date: 2014-08-12 18:08 +0200
http://bitbucket.org/pypy/stmgc/changeset/50d22a5baf26/
Log: Kill STM_STACK_MARKER_{NEW,OLD} and use 'moved_off_ssbase' instead.
diff --git a/c7/stm/marker.c b/c7/stm/marker.c
--- a/c7/stm/marker.c
+++ b/c7/stm/marker.c
@@ -18,10 +18,9 @@
struct stm_shadowentry_s *current = tl->shadowstack - 1;
struct stm_shadowentry_s *base = tl->shadowstack_base;
- /* The shadowstack_base contains STM_STACK_MARKER_OLD, which is
- a convenient stopper for the loop below but which shouldn't
- be returned. */
- assert(base->ss == (object_t *)STM_STACK_MARKER_OLD);
+ /* The shadowstack_base contains -1, which is a convenient stopper for
+ the loop below but which shouldn't be returned. */
+ assert(base->ss == (object_t *)-1);
while (!(((uintptr_t)current->ss) & 1)) {
current--;
diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c
--- a/c7/stm/nursery.c
+++ b/c7/stm/nursery.c
@@ -156,27 +156,22 @@
{
stm_thread_local_t *tl = STM_SEGMENT->running_thread;
struct stm_shadowentry_s *current = tl->shadowstack;
- struct stm_shadowentry_s *base = tl->shadowstack_base;
- while (1) {
+ struct stm_shadowentry_s *finalbase = tl->shadowstack_base;
+ struct stm_shadowentry_s *ssbase;
+ ssbase = (struct stm_shadowentry_s *)tl->rjthread.moved_off_ssbase;
+ if (ssbase == NULL)
+ ssbase = finalbase;
+ else
+ assert(finalbase <= ssbase && ssbase <= current);
+
+ while (current > ssbase) {
--current;
- OPT_ASSERT(current >= base);
-
uintptr_t x = (uintptr_t)current->ss;
if ((x & 3) == 0) {
/* the stack entry is a regular pointer (possibly NULL) */
minor_trace_if_young(¤t->ss);
}
- else if (x == STM_STACK_MARKER_NEW) {
- /* the marker was not already seen: mark it as seen,
- but continue looking more deeply in the shadowstack */
- current->ss = (object_t *)STM_STACK_MARKER_OLD;
- }
- else if (x == STM_STACK_MARKER_OLD) {
- /* the marker was already seen: we can stop the
- root stack tracing at this point */
- break;
- }
else {
/* it is an odd-valued marker, ignore */
}
diff --git a/c7/stm/setup.c b/c7/stm/setup.c
--- a/c7/stm/setup.c
+++ b/c7/stm/setup.c
@@ -201,13 +201,13 @@
struct stm_shadowentry_s *s = (struct stm_shadowentry_s *)start;
tl->shadowstack = s;
tl->shadowstack_base = s;
- STM_PUSH_ROOT(*tl, STM_STACK_MARKER_OLD);
+ STM_PUSH_ROOT(*tl, -1);
}
static void _done_shadow_stack(stm_thread_local_t *tl)
{
assert(tl->shadowstack > tl->shadowstack_base);
- assert(tl->shadowstack_base->ss == (object_t *)STM_STACK_MARKER_OLD);
+ assert(tl->shadowstack_base->ss == (object_t *)-1);
char *start = (char *)tl->shadowstack_base;
_shadowstack_trap_page(start, PROT_READ | PROT_WRITE);
diff --git a/c7/stmgc.h b/c7/stmgc.h
--- a/c7/stmgc.h
+++ b/c7/stmgc.h
@@ -313,8 +313,6 @@
#define STM_PUSH_ROOT(tl, p) ((tl).shadowstack++->ss = (object_t *)(p))
#define STM_POP_ROOT(tl, p) ((p) = (typeof(p))((--(tl).shadowstack)->ss))
#define STM_POP_ROOT_RET(tl) ((--(tl).shadowstack)->ss)
-#define STM_STACK_MARKER_NEW (-41)
-#define STM_STACK_MARKER_OLD (-43)
/* Every thread needs to have a corresponding stm_thread_local_t
diff --git a/c7/test/support.py b/c7/test/support.py
--- a/c7/test/support.py
+++ b/c7/test/support.py
@@ -12,8 +12,6 @@
#define _STM_FAST_ALLOC ...
#define _STM_GCFLAG_WRITE_BARRIER ...
#define _STM_CARD_SIZE ...
-#define STM_STACK_MARKER_NEW ...
-#define STM_STACK_MARKER_OLD ...
struct stm_shadowentry_s {
object_t *ss;
@@ -622,7 +620,7 @@
def push_root_no_gc(self):
"Pushes an invalid object, to crash in case the GC is called"
- self.push_root(ffi.cast("object_t *", -1))
+ self.push_root(ffi.cast("object_t *", 8))
def check_char_everywhere(self, obj, expected_content, offset=HDR):
for i in range(len(self.tls)):
diff --git a/c7/test/test_gcpage.py b/c7/test/test_gcpage.py
--- a/c7/test/test_gcpage.py
+++ b/c7/test/test_gcpage.py
@@ -234,7 +234,7 @@
p1 = stm_allocate(600)
stm_set_char(p1, 'o')
self.push_root(p1)
- self.push_root(ffi.cast("object_t *", lib.STM_STACK_MARKER_NEW))
+ self.push_root(ffi.cast("object_t *", 123))
p2 = stm_allocate(600)
stm_set_char(p2, 't')
self.push_root(p2)
@@ -243,7 +243,7 @@
#
p2 = self.pop_root()
m = self.pop_root()
- assert m == ffi.cast("object_t *", lib.STM_STACK_MARKER_OLD)
+ assert m == ffi.cast("object_t *", 123)
p1 = self.pop_root()
assert stm_get_char(p1) == 'o'
assert stm_get_char(p2) == 't'
diff --git a/c7/test/test_nursery.py b/c7/test/test_nursery.py
--- a/c7/test/test_nursery.py
+++ b/c7/test/test_nursery.py
@@ -203,7 +203,7 @@
p1 = stm_allocate(600)
stm_set_char(p1, 'o')
self.push_root(p1)
- self.push_root(ffi.cast("object_t *", lib.STM_STACK_MARKER_NEW))
+ self.push_root(ffi.cast("object_t *", 123))
p2 = stm_allocate(600)
stm_set_char(p2, 't')
self.push_root(p2)
@@ -212,12 +212,13 @@
#
p2 = self.pop_root()
m = self.pop_root()
- assert m == ffi.cast("object_t *", lib.STM_STACK_MARKER_OLD)
+ assert m == ffi.cast("object_t *", 123)
p1 = self.pop_root()
assert stm_get_char(p1) == 'o'
assert stm_get_char(p2) == 't'
def test_marker_2(self):
+ py.test.skip("testing this requires working shadowstack saving logic")
self.start_transaction()
p1 = stm_allocate(600)
stm_set_char(p1, 'o')
From noreply at buildbot.pypy.org Tue Aug 12 18:15:17 2014
From: noreply at buildbot.pypy.org (Raemi)
Date: Tue, 12 Aug 2014 18:15:17 +0200 (CEST)
Subject: [pypy-commit] stmgc default: add demo_random2 that includes
returning from frames in normal transactions
Message-ID: <20140812161517.D3D921C0157@cobra.cs.uni-duesseldorf.de>
Author: Remi Meier
Branch:
Changeset: r1306:d29906f937fc
Date: 2014-08-12 18:16 +0200
http://bitbucket.org/pypy/stmgc/changeset/d29906f937fc/
Log: add demo_random2 that includes returning from frames in normal
transactions
diff --git a/c7/demo/demo_random2.c b/c7/demo/demo_random2.c
new file mode 100644
--- /dev/null
+++ b/c7/demo/demo_random2.c
@@ -0,0 +1,532 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "stmgc.h"
+
+#define NUMTHREADS 3
+#define STEPS_PER_THREAD 500
+#define THREAD_STARTS 1000 // how many restarts of threads
+#define PREBUILT_ROOTS 3
+#define FORKS 3
+
+#define ACTIVE_ROOTS_SET_SIZE 100 // max num of roots created/alive in one transaction
+
+
+// SUPPORT
+struct node_s;
+typedef TLPREFIX struct node_s node_t;
+typedef node_t* nodeptr_t;
+typedef object_t* objptr_t;
+int num_forked_children = 0;
+
+struct node_s {
+ struct object_s hdr;
+ int sig;
+ long my_size;
+ long my_id;
+ long my_hash;
+ nodeptr_t next;
+};
+
+#define SIGNATURE 0x01234567
+
+
+static sem_t done;
+__thread stm_thread_local_t stm_thread_local;
+__thread void *thread_may_fork;
+
+// global and per-thread-data
+time_t default_seed;
+objptr_t prebuilt_roots[PREBUILT_ROOTS];
+
+struct thread_data {
+ unsigned int thread_seed;
+ int steps_left;
+ objptr_t active_roots_set[ACTIVE_ROOTS_SET_SIZE];
+ int active_roots_num;
+ long roots_on_ss;
+ long roots_on_ss_at_tr_start;
+};
+__thread struct thread_data td;
+
+struct thread_data *_get_td(void)
+{
+ return &td; /* for gdb */
+}
+
+
+ssize_t stmcb_size_rounded_up(struct object_s *ob)
+{
+ return ((struct node_s*)ob)->my_size;
+}
+
+void stmcb_trace(struct object_s *obj, void visit(object_t **))
+{
+ struct node_s *n;
+ n = (struct node_s*)obj;
+
+ /* and the same value at the end: */
+ /* note, ->next may be the same as last_next */
+ nodeptr_t *last_next = (nodeptr_t*)((char*)n + n->my_size - sizeof(void*));
+
+ assert(n->next == *last_next);
+
+ visit((object_t **)&n->next);
+ visit((object_t **)last_next);
+
+ assert(n->next == *last_next);
+}
+
+void stmcb_commit_soon() {}
+
+void stmcb_trace_cards(struct object_s *obj, void cb(object_t **),
+ uintptr_t start, uintptr_t stop) {
+ abort();
+}
+void stmcb_get_card_base_itemsize(struct object_s *obj,
+ uintptr_t offset_itemsize[2]) {
+ abort();
+}
+
+int get_rand(int max)
+{
+ if (max == 0)
+ return 0;
+ return (int)(rand_r(&td.thread_seed) % (unsigned int)max);
+}
+
+objptr_t get_random_root()
+{
+ /* get some root from shadowstack or active_root_set or prebuilt_roots */
+ int num = get_rand(3);
+ intptr_t ss_size = td.roots_on_ss;
+ if (num == 0 && ss_size > 0) {
+ num = get_rand(ss_size);
+ /* XXX: impl detail: there is already a "-1" on the SS -> +1 */
+ objptr_t r = (objptr_t)stm_thread_local.shadowstack_base[num+1].ss;
+ assert((((uintptr_t)r) & 3) == 0);
+ }
+
+ if (num == 1 && td.active_roots_num > 0) {
+ num = get_rand(td.active_roots_num);
+ return td.active_roots_set[num];
+ } else {
+ num = get_rand(PREBUILT_ROOTS);
+ return prebuilt_roots[num];
+ }
+}
+
+
+long push_roots()
+{
+ int i;
+ long to_push = td.active_roots_num;
+ for (i = to_push - 1; i >= 0; i--) {
+ STM_PUSH_ROOT(stm_thread_local, td.active_roots_set[i]);
+ td.roots_on_ss++;
+ td.active_roots_num--;
+ }
+ return to_push;
+}
+
+void pop_roots(long to_pop)
+{
+ int i;
+ for (i = 0; i < to_pop; i++) {
+ STM_POP_ROOT(stm_thread_local, td.active_roots_set[i]);
+ td.roots_on_ss--;
+ td.active_roots_num++;
+ assert(td.active_roots_num < ACTIVE_ROOTS_SET_SIZE);
+ }
+}
+
+void del_root(int idx)
+{
+ int i;
+
+ for (i = idx; i < td.active_roots_num - 1; i++)
+ td.active_roots_set[i] = td.active_roots_set[i + 1];
+ td.active_roots_num--;
+}
+
+void add_root(objptr_t r)
+{
+ if (r && td.active_roots_num < ACTIVE_ROOTS_SET_SIZE) {
+ td.active_roots_set[td.active_roots_num++] = r;
+ }
+}
+
+
+void read_barrier(objptr_t p)
+{
+ if (p != NULL) {
+ stm_read(p);
+ }
+}
+
+void write_barrier(objptr_t p)
+{
+ if (p != NULL) {
+ stm_write(p);
+ }
+}
+
+void set_next(objptr_t p, objptr_t v)
+{
+ if (p != NULL) {
+ nodeptr_t n = (nodeptr_t)p;
+
+ /* and the same value at the end: */
+ nodeptr_t TLPREFIX *last_next = (nodeptr_t TLPREFIX *)((stm_char*)n + n->my_size - sizeof(void*));
+ assert(n->next == *last_next);
+ n->next = (nodeptr_t)v;
+ *last_next = (nodeptr_t)v;
+ }
+}
+
+nodeptr_t get_next(objptr_t p)
+{
+ nodeptr_t n = (nodeptr_t)p;
+
+ /* and the same value at the end: */
+ nodeptr_t TLPREFIX *last_next = (nodeptr_t TLPREFIX *)((stm_char*)n + n->my_size - sizeof(void*));
+ OPT_ASSERT(n->next == *last_next);
+
+ return n->next;
+}
+
+
+objptr_t simple_events(objptr_t p, objptr_t _r)
+{
+ int k = get_rand(10);
+
+ switch (k) {
+ case 0: // remove a root
+ if (td.active_roots_num) {
+ del_root(get_rand(td.active_roots_num));
+ }
+ break;
+ case 1: // add 'p' to roots
+ add_root(p);
+ break;
+ case 2: // set 'p' to point to a root
+ if (_r)
+ p = _r;
+ break;
+ case 3: // allocate fresh 'p'
+ ;
+ long pushed = push_roots();
+ size_t sizes[4] = {sizeof(struct node_s),
+ sizeof(struct node_s) + (get_rand(100000) & ~15),
+ sizeof(struct node_s) + 4096,
+ sizeof(struct node_s) + 4096*70};
+ size_t size = sizes[get_rand(4)];
+ p = stm_allocate(size);
+ ((nodeptr_t)p)->sig = SIGNATURE;
+ ((nodeptr_t)p)->my_size = size;
+ ((nodeptr_t)p)->my_id = 0;
+ ((nodeptr_t)p)->my_hash = 0;
+ pop_roots(pushed);
+ break;
+ case 4: // read and validate 'p'
+ read_barrier(p);
+ break;
+ case 5: // only do a stm_write_barrier
+ write_barrier(p);
+ break;
+ case 6: // follow p->next
+ if (p) {
+ read_barrier(p);
+ p = (objptr_t)(get_next(p));
+ }
+ break;
+ case 7: // set 'p' as *next in one of the roots
+ write_barrier(_r);
+ set_next(_r, p);
+ break;
+ case 8: // id checking
+ if (p) {
+ nodeptr_t n = (nodeptr_t)p;
+ if (n->my_id == 0) {
+ write_barrier(p);
+ n->my_id = stm_id(p);
+ }
+ else {
+ read_barrier(p);
+ assert(n->my_id == stm_id(p));
+ }
+ }
+ break;
+ case 9:
+ if (p) {
+ nodeptr_t n = (nodeptr_t)p;
+ if (n->my_hash == 0) {
+ write_barrier(p);
+ n->my_hash = stm_identityhash(p);
+ }
+ else {
+ read_barrier(p);
+ assert(n->my_hash == stm_identityhash(p));
+ }
+ }
+ break;
+ }
+ return p;
+}
+
+
+void frame_loop();
+objptr_t do_step(objptr_t p)
+{
+ objptr_t _r;
+ int k;
+
+ _r = get_random_root();
+ k = get_rand(12);
+
+ if (k < 10) {
+ p = simple_events(p, _r);
+ } else if (get_rand(20) == 1) {
+ long pushed = push_roots();
+ stm_commit_transaction();
+ td.roots_on_ss_at_tr_start = td.roots_on_ss;
+
+ /* if (get_rand(100) < 98) { */
+ /* stm_start_transaction(&stm_thread_local); */
+ /* } else */{
+ stm_start_inevitable_transaction(&stm_thread_local);
+ }
+ td.roots_on_ss = td.roots_on_ss_at_tr_start;
+ td.active_roots_num = 0;
+ pop_roots(pushed);
+ return NULL;
+ } else if (get_rand(10) == 1) {
+ fprintf(stderr, "R");
+
+ long pushed = push_roots();
+ /* leaving our frame */
+ frame_loop();
+ /* back in our frame */
+ pop_roots(pushed);
+
+ fprintf(stderr, "r");
+ return NULL;
+ } else if (get_rand(20) == 1) {
+ long pushed = push_roots();
+ stm_become_inevitable(&stm_thread_local, "please");
+ assert(stm_is_inevitable());
+ pop_roots(pushed);
+ return NULL;
+ } else if (get_rand(200) == 1) {
+ return (objptr_t)-1; // possibly fork
+ } else if (get_rand(240) == 1) {
+ long pushed = push_roots();
+ stm_become_globally_unique_transaction(&stm_thread_local, "really");
+ fprintf(stderr, "[GUT/%d]", (int)STM_SEGMENT->segment_num);
+ pop_roots(pushed);
+ return NULL;
+ }
+ return p;
+}
+
+void frame_loop()
+{
+ objptr_t p = NULL;
+ rewind_jmp_buf rjbuf;
+
+ stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf);
+ volatile long roots_on_ss = td.roots_on_ss;
+ /* "interpreter main loop": this is one "application-frame" */
+ while (td.steps_left-->0 && get_rand(10) != 0) {
+ if (td.steps_left % 8 == 0)
+ fprintf(stdout, "#");
+
+ assert(p == NULL || ((nodeptr_t)p)->sig == SIGNATURE);
+
+ p = do_step(p);
+
+ if (p == (objptr_t)-1) {
+ p = NULL;
+
+ /* long call_fork = (thread_may_fork != NULL && *(long *)thread_may_fork); */
+ /* if (call_fork) { /\* common case *\/ */
+ /* push_roots(); */
+ /* /\* run a fork() inside the transaction *\/ */
+ /* printf("========== FORK =========\n"); */
+ /* *(long*)thread_may_fork = 0; */
+ /* pid_t child = fork(); */
+ /* printf("=== in process %d thread %lx, fork() returned %d\n", */
+ /* (int)getpid(), (long)pthread_self(), (int)child); */
+ /* if (child == -1) { */
+ /* fprintf(stderr, "fork() error: %m\n"); */
+ /* abort(); */
+ /* } */
+ /* if (child != 0) */
+ /* num_forked_children++; */
+ /* else */
+ /* num_forked_children = 0; */
+
+ /* pop_roots(); */
+ /* } */
+ }
+ }
+ assert(roots_on_ss == td.roots_on_ss);
+
+ stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf);
+}
+
+
+
+void setup_thread()
+{
+ memset(&td, 0, sizeof(struct thread_data));
+
+ /* stupid check because gdb shows garbage
+ in td.roots: */
+ int i;
+ for (i = 0; i < ACTIVE_ROOTS_SET_SIZE; i++)
+ assert(td.active_roots_set[i] == NULL);
+
+ td.thread_seed = default_seed++;
+ td.steps_left = STEPS_PER_THREAD;
+ td.active_roots_num = 0;
+ td.roots_on_ss = 0;
+ td.roots_on_ss_at_tr_start = 0;
+}
+
+
+
+void *demo_random(void *arg)
+{
+ int status;
+ rewind_jmp_buf rjbuf;
+ stm_register_thread_local(&stm_thread_local);
+ stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf);
+
+ setup_thread();
+
+ td.roots_on_ss_at_tr_start = 0;
+ stm_start_transaction(&stm_thread_local);
+ td.roots_on_ss = td.roots_on_ss_at_tr_start;
+ td.active_roots_num = 0;
+
+ thread_may_fork = arg;
+ while (td.steps_left-->0) {
+ frame_loop();
+ }
+
+ stm_commit_transaction();
+
+ stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf);
+ stm_unregister_thread_local(&stm_thread_local);
+
+ status = sem_post(&done); assert(status == 0);
+ return NULL;
+}
+
+void newthread(void*(*func)(void*), void *arg)
+{
+ pthread_t th;
+ int status = pthread_create(&th, NULL, func, arg);
+ if (status != 0)
+ abort();
+ pthread_detach(th);
+ printf("started new thread\n");
+}
+
+
+void setup_globals()
+{
+ int i;
+
+ struct node_s prebuilt_template = {
+ .sig = SIGNATURE,
+ .my_size = sizeof(struct node_s),
+ .my_id = 0,
+ .my_hash = 0,
+ .next = NULL
+ };
+
+ stm_start_inevitable_transaction(&stm_thread_local);
+ for (i = 0; i < PREBUILT_ROOTS; i++) {
+ void* new_templ = malloc(sizeof(struct node_s));
+ memcpy(new_templ, &prebuilt_template, sizeof(struct node_s));
+ prebuilt_roots[i] = stm_setup_prebuilt((objptr_t)(long)new_templ);
+
+ if (i % 2 == 0) {
+ int hash = i + 5;
+ stm_set_prebuilt_identityhash(prebuilt_roots[i],
+ hash);
+ ((nodeptr_t)prebuilt_roots[i])->my_hash = hash;
+ }
+ }
+ stm_commit_transaction();
+}
+
+int main(void)
+{
+ int i, status;
+ rewind_jmp_buf rjbuf;
+
+ /* pick a random seed from the time in seconds.
+ A bit pointless for now... because the interleaving of the
+ threads is really random. */
+ default_seed = time(NULL);
+ printf("running with seed=%lld\n", (long long)default_seed);
+
+ status = sem_init(&done, 0, 0);
+ assert(status == 0);
+
+
+ stm_setup();
+ stm_register_thread_local(&stm_thread_local);
+ stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf);
+
+ setup_globals();
+
+ int thread_starts = NUMTHREADS * THREAD_STARTS;
+ for (i = 0; i < NUMTHREADS; i++) {
+ newthread(demo_random, NULL);
+ thread_starts--;
+ }
+
+ for (i=0; i < NUMTHREADS * THREAD_STARTS; i++) {
+ status = sem_wait(&done);
+ assert(status == 0);
+ printf("thread finished\n");
+ if (thread_starts) {
+ long forkbase = NUMTHREADS * THREAD_STARTS / (FORKS + 1);
+ long _fork = (thread_starts % forkbase) == 0;
+ thread_starts--;
+ newthread(demo_random, &_fork);
+ }
+ }
+
+ for (i = 0; i < num_forked_children; i++) {
+ pid_t child = wait(&status);
+ if (child == -1)
+ perror("wait");
+ printf("From %d: child %d terminated with exit status %d\n",
+ (int)getpid(), (int)child, status);
+ if (WIFEXITED(status) && WEXITSTATUS(status) == 0)
+ ;
+ else {
+ printf("*** error from the child ***\n");
+ return 1;
+ }
+ }
+
+ printf("Test OK!\n");
+
+ stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf);
+ stm_unregister_thread_local(&stm_thread_local);
+ stm_teardown();
+
+ return 0;
+}
From noreply at buildbot.pypy.org Tue Aug 12 18:21:13 2014
From: noreply at buildbot.pypy.org (Raemi)
Date: Tue, 12 Aug 2014 18:21:13 +0200 (CEST)
Subject: [pypy-commit] stmgc default: fix
Message-ID: <20140812162113.D3E3B1C0157@cobra.cs.uni-duesseldorf.de>
Author: Remi Meier
Branch:
Changeset: r1307:d70bdefc4757
Date: 2014-08-12 18:22 +0200
http://bitbucket.org/pypy/stmgc/changeset/d70bdefc4757/
Log: fix
diff --git a/c7/demo/demo_random2.c b/c7/demo/demo_random2.c
--- a/c7/demo/demo_random2.c
+++ b/c7/demo/demo_random2.c
@@ -135,14 +135,15 @@
return to_push;
}
+void add_root(objptr_t r);
void pop_roots(long to_pop)
{
int i;
for (i = 0; i < to_pop; i++) {
- STM_POP_ROOT(stm_thread_local, td.active_roots_set[i]);
+ objptr_t t;
+ STM_POP_ROOT(stm_thread_local, t);
+ add_root(t);
td.roots_on_ss--;
- td.active_roots_num++;
- assert(td.active_roots_num < ACTIVE_ROOTS_SET_SIZE);
}
}
@@ -297,9 +298,9 @@
stm_commit_transaction();
td.roots_on_ss_at_tr_start = td.roots_on_ss;
- /* if (get_rand(100) < 98) { */
- /* stm_start_transaction(&stm_thread_local); */
- /* } else */{
+ if (get_rand(100) < 98) {
+ stm_start_transaction(&stm_thread_local);
+ } else{
stm_start_inevitable_transaction(&stm_thread_local);
}
td.roots_on_ss = td.roots_on_ss_at_tr_start;
From noreply at buildbot.pypy.org Tue Aug 12 18:35:12 2014
From: noreply at buildbot.pypy.org (waedt)
Date: Tue, 12 Aug 2014 18:35:12 +0200 (CEST)
Subject: [pypy-commit] pypy utf8-unicode2: Fix translation
Message-ID: <20140812163512.F32C81C0157@cobra.cs.uni-duesseldorf.de>
Author: Tyler Wade
Branch: utf8-unicode2
Changeset: r72766:50441033e543
Date: 2014-08-12 11:34 -0500
http://bitbucket.org/pypy/pypy/changeset/50441033e543/
Log: Fix translation
diff --git a/pypy/module/_io/interp_stringio.py b/pypy/module/_io/interp_stringio.py
--- a/pypy/module/_io/interp_stringio.py
+++ b/pypy/module/_io/interp_stringio.py
@@ -27,11 +27,10 @@
newline = None
else:
newline = space.unicode_w(w_newline)
+ newline = newline.bytes
- if (newline is not None and len(newline) != 0 and
- utf8.NE(newline, Utf8Str('\n')) and
- utf8.NE(newline, Utf8Str('\r\n')) and
- utf8.NE(newline, Utf8Str('\r'))):
+ if (newline and newline != '\n' and newline != '\r\n' and
+ newline != '\r'):
# Not using oefmt() because I don't know how to ues it
# with unicode
raise OperationError(space.w_ValueError,
@@ -39,12 +38,12 @@
space.wrap("illegal newline value: %s"), space.wrap(newline)
)
)
+
if newline is not None:
self.readnl = newline
- self.readuniversal = newline is None or len(newline) == 0
+ self.readuniversal = not newline
self.readtranslate = newline is None
- if (newline is not None and len(newline) > 0 and
- utf8ord(newline) == ord("\r")):
+ if newline and newline[0] == '\r':
self.writenl = newline
if self.readuniversal:
self.w_decoder = space.call_function(
@@ -146,7 +145,8 @@
if self.writenl:
w_decoded = space.call_method(
- w_decoded, "replace", space.wrap("\n"), space.wrap(self.writenl)
+ w_decoded, "replace", space.wrap("\n"),
+ space.wrap(Utf8Str(self.writenl))
)
string = space.unicode_w(w_decoded)
diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py
--- a/pypy/module/_io/interp_textio.py
+++ b/pypy/module/_io/interp_textio.py
@@ -372,17 +372,14 @@
newline = None
else:
newline = space.unicode_w(w_newline)
+ # newline is guaranteed to be either empty or ascii
+ newline = newline.bytes
- if (newline is not None and len(newline) > 0 and
- not (utf8.EQ(newline, Utf8Str('\n')) or
- utf8.EQ(newline, Utf8Str('\r\n')) or
- utf8.EQ(newline, Utf8Str('\r')))):
+ if (newline and newline != '\n' and newline != '\r\n' and
+ newline != '\r'):
r = space.str_w(space.repr(w_newline))
raise OperationError(space.w_ValueError, space.wrap(
"illegal newline value: %s" % (r,)))
- elif newline is not None:
- # newline is guaranteed to be either empty or ascii
- newline = newline.bytes
self.line_buffering = line_buffering
From noreply at buildbot.pypy.org Tue Aug 12 18:38:50 2014
From: noreply at buildbot.pypy.org (waedt)
Date: Tue, 12 Aug 2014 18:38:50 +0200 (CEST)
Subject: [pypy-commit] pypy utf8-unicode2: Copy failing lib-python test
Message-ID: <20140812163850.90FA91C0157@cobra.cs.uni-duesseldorf.de>
Author: Tyler Wade
Branch: utf8-unicode2
Changeset: r72767:90afe25b63d5
Date: 2014-08-12 11:38 -0500
http://bitbucket.org/pypy/pypy/changeset/90afe25b63d5/
Log: Copy failing lib-python test
diff --git a/pypy/module/_io/test/test_textio.py b/pypy/module/_io/test/test_textio.py
--- a/pypy/module/_io/test/test_textio.py
+++ b/pypy/module/_io/test/test_textio.py
@@ -1,5 +1,5 @@
class AppTestTextIO:
- spaceconfig = dict(usemodules=['_io', '_locale'])
+ spaceconfig = dict(usemodules=['_io', '_locale', 'thread', 'time', 'signal'])
def test_constructor(self):
import _io
@@ -281,6 +281,58 @@
t.read() == u'a'
+ def test_interrupted_write(self):
+ import _io
+ import os
+ import threading
+ import sys
+ import signal
+ import errno
+
+ item = u'xy'
+ bytes = 'xy'
+
+ signal.signal(signal.SIGALRM, lambda x, y: 1 // 0)
+
+ read_results = []
+ def _read():
+ s = os.read(r, 1)
+ read_results.append(s)
+ t = threading.Thread(target=_read)
+ t.daemon = True
+ r, w = os.pipe()
+ try:
+ wio = _io.open(w, mode='w', encoding="ascii")
+ t.start()
+ signal.alarm(1)
+ # Fill the pipe enough that the write will be blocking.
+ # It will be interrupted by the timer armed above. Since the
+ # other thread has read one byte, the low-level write will
+ # return with a successful (partial) result rather than an EINTR.
+ # The buffered IO layer must check for pending signal
+ # handlers, which in this case will invoke alarm_interrupt().
+
+ raises(ZeroDivisionError, wio.write, item * (4194305 // len(item) + 1))
+
+ t.join()
+ # We got one byte, get another one and check that it isn't a
+ # repeat of the first one.
+ read_results.append(os.read(r, 1))
+
+ assert read_results == [bytes[0:1], bytes[1:2]]
+ finally:
+ os.close(w)
+ os.close(r)
+ # This is deliberate. If we didn't close the file descriptor
+ # before closing wio, wio would try to flush its internal
+ # buffer, and block again.
+ try:
+ wio.close()
+ except IOError as e:
+ if e.errno != errno.EBADF:
+ raise
+
+
class AppTestIncrementalNewlineDecoder:
def test_newline_decoder(self):
import _io
From noreply at buildbot.pypy.org Tue Aug 12 18:43:29 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Tue, 12 Aug 2014 18:43:29 +0200 (CEST)
Subject: [pypy-commit] stmgc default: Add another unit test (run by
test/test_demo.py) for major GC tracking
Message-ID: <20140812164329.A6D1F1C0157@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r1308:38dbf997b57b
Date: 2014-08-12 18:43 +0200
http://bitbucket.org/pypy/stmgc/changeset/38dbf997b57b/
Log: Add another unit test (run by test/test_demo.py) for major GC
tracking the parts of the shadowstacks that have been moved away
diff --git a/c7/demo/test_shadowstack.c b/c7/demo/test_shadowstack.c
new file mode 100644
--- /dev/null
+++ b/c7/demo/test_shadowstack.c
@@ -0,0 +1,68 @@
+#include
+#include
+#include "stmgc.h"
+
+stm_thread_local_t stm_thread_local;
+
+typedef TLPREFIX struct node_s node_t;
+
+struct node_s {
+ struct object_s hdr;
+ long value;
+};
+
+ssize_t stmcb_size_rounded_up(struct object_s *ob)
+{
+ return sizeof(struct node_s);
+}
+void stmcb_trace(struct object_s *obj, void visit(object_t **))
+{
+}
+void stmcb_get_card_base_itemsize(struct object_s *obj,
+ uintptr_t offset_itemsize[2])
+{
+ abort();
+}
+void stmcb_trace_cards(struct object_s *obj, void visit(object_t **),
+ uintptr_t start, uintptr_t stop)
+{
+ abort();
+}
+void stmcb_commit_soon() {}
+
+
+int main(void)
+{
+ rewind_jmp_buf rjbuf;
+
+ stm_setup();
+ stm_register_thread_local(&stm_thread_local);
+ stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf);
+
+ stm_start_transaction(&stm_thread_local);
+ node_t *node = (node_t *)stm_allocate(sizeof(struct node_s));
+ node->value = 129821;
+ STM_PUSH_ROOT(stm_thread_local, node);
+ stm_commit_transaction();
+
+ /* now in a new transaction, pop the node off the shadowstack, but
+ then do a major collection. It should still be found by the
+ tracing logic. */
+ stm_start_transaction(&stm_thread_local);
+ STM_POP_ROOT(stm_thread_local, node);
+ assert(node->value == 129821);
+ STM_PUSH_ROOT(stm_thread_local, NULL);
+ stm_collect(9);
+
+ node_t *node2 = (node_t *)stm_allocate(sizeof(struct node_s));
+ assert(node2 != node);
+ assert(node->value == 129821);
+
+ STM_PUSH_ROOT(stm_thread_local, node2);
+ stm_collect(0);
+ STM_POP_ROOT(stm_thread_local, node2);
+ assert(node2 != node);
+ assert(node->value == 129821);
+
+ return 0;
+}
diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c
--- a/c7/stm/gcpage.c
+++ b/c7/stm/gcpage.c
@@ -363,6 +363,16 @@
mark_trace(obj, segment_base);
}
+static void *mark_visit_objects_from_ss(void *_, const void *slice, size_t size)
+{
+ const struct stm_shadowentry_s *p, *end;
+ p = (const struct stm_shadowentry_s *)slice;
+ end = (const struct stm_shadowentry_s *)(slice + size);
+ for (; p < end; p++)
+ mark_visit_object(p->ss, stm_object_pages);
+ return NULL;
+}
+
static void mark_visit_from_roots(void)
{
if (testing_prebuilt_objs != NULL) {
@@ -386,6 +396,7 @@
mark_visit_object(current->ss, segment_base);
}
mark_visit_object(tl->thread_local_obj, segment_base);
+ stm_rewind_jmp_enum_shadowstack(tl, mark_visit_objects_from_ss);
tl = tl->next;
} while (tl != stm_all_thread_locals);
diff --git a/c7/stm/rewind_setjmp.c b/c7/stm/rewind_setjmp.c
--- a/c7/stm/rewind_setjmp.c
+++ b/c7/stm/rewind_setjmp.c
@@ -105,21 +105,28 @@
do_longjmp(rjthread, &_rewind_jmp_marker);
}
-char *rewind_jmp_restore_shadowstack(rewind_jmp_thread *rjthread)
+
+char *rewind_jmp_enum_shadowstack(rewind_jmp_thread *rjthread,
+ void *callback(void *, const void *, size_t))
{
struct _rewind_jmp_moved_s *p = rjthread->moved_off;
char *sstarget = rjthread->moved_off_ssbase;
while (p) {
char *ssend = sstarget + p->shadowstack_size;
- memcpy(sstarget, ((char *)p) + RJM_HEADER + p->stack_size,
- p->shadowstack_size);
+ callback(sstarget, ((char *)p) + RJM_HEADER + p->stack_size,
+ p->shadowstack_size);
sstarget = ssend;
p = p->next;
}
return sstarget;
}
+char *rewind_jmp_restore_shadowstack(rewind_jmp_thread *rjthread)
+{
+ return rewind_jmp_enum_shadowstack(rjthread, memcpy);
+}
+
__attribute__((noinline))
void _rewind_jmp_copy_stack_slice(rewind_jmp_thread *rjthread)
{
diff --git a/c7/stm/rewind_setjmp.h b/c7/stm/rewind_setjmp.h
--- a/c7/stm/rewind_setjmp.h
+++ b/c7/stm/rewind_setjmp.h
@@ -1,6 +1,8 @@
#ifndef _REWIND_SETJMP_H_
#define _REWIND_SETJMP_H_
+#include
+
/************************************************************
: : ^^^^^
@@ -75,6 +77,8 @@
long rewind_jmp_setjmp(rewind_jmp_thread *rjthread, void *ss);
void rewind_jmp_longjmp(rewind_jmp_thread *rjthread) __attribute__((noreturn));
char *rewind_jmp_restore_shadowstack(rewind_jmp_thread *rjthread);
+char *rewind_jmp_enum_shadowstack(rewind_jmp_thread *rjthread,
+ void *callback(void *, const void *, size_t));
#define rewind_jmp_forget(rjthread) do { \
if ((rjthread)->moved_off) _rewind_jmp_free_stack_slices(rjthread); \
diff --git a/c7/stmgc.h b/c7/stmgc.h
--- a/c7/stmgc.h
+++ b/c7/stmgc.h
@@ -343,6 +343,8 @@
(tl)->shadowstack = (struct stm_shadowentry_s *) \
rewind_jmp_restore_shadowstack(&(tl)->rjthread); \
} while (0)
+#define stm_rewind_jmp_enum_shadowstack(tl, callback) \
+ rewind_jmp_enum_shadowstack(&(tl)->rjthread, callback)
/* Starting and ending transactions. stm_read(), stm_write() and
stm_allocate() should only be called from within a transaction.
diff --git a/c7/test/test_demo.py b/c7/test/test_demo.py
--- a/c7/test/test_demo.py
+++ b/c7/test/test_demo.py
@@ -13,6 +13,8 @@
self._do("make -C ../demo %s" % target)
self._do("../demo/%s 2> /dev/null" % target)
+ def test_shadowstack(self): self.make_and_run("debug-test_shadowstack")
+
def test_demo2_debug(self): self.make_and_run("debug-demo2")
def test_demo2_build(self): self.make_and_run("build-demo2")
def test_demo2_release(self): self.make_and_run("release-demo2")
From noreply at buildbot.pypy.org Tue Aug 12 18:49:22 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Tue, 12 Aug 2014 18:49:22 +0200 (CEST)
Subject: [pypy-commit] stmgc default: Test and fix
Message-ID: <20140812164922.1CA051C0157@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r1309:1cb240cc60a2
Date: 2014-08-12 18:49 +0200
http://bitbucket.org/pypy/stmgc/changeset/1cb240cc60a2/
Log: Test and fix
diff --git a/c7/demo/test_shadowstack.c b/c7/demo/test_shadowstack.c
--- a/c7/demo/test_shadowstack.c
+++ b/c7/demo/test_shadowstack.c
@@ -43,12 +43,14 @@
node_t *node = (node_t *)stm_allocate(sizeof(struct node_s));
node->value = 129821;
STM_PUSH_ROOT(stm_thread_local, node);
+ STM_PUSH_ROOT(stm_thread_local, 333); /* odd value */
stm_commit_transaction();
/* now in a new transaction, pop the node off the shadowstack, but
then do a major collection. It should still be found by the
tracing logic. */
stm_start_transaction(&stm_thread_local);
+ STM_POP_ROOT_RET(stm_thread_local);
STM_POP_ROOT(stm_thread_local, node);
assert(node->value == 129821);
STM_PUSH_ROOT(stm_thread_local, NULL);
diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c
--- a/c7/stm/gcpage.c
+++ b/c7/stm/gcpage.c
@@ -369,7 +369,8 @@
p = (const struct stm_shadowentry_s *)slice;
end = (const struct stm_shadowentry_s *)(slice + size);
for (; p < end; p++)
- mark_visit_object(p->ss, stm_object_pages);
+ if ((((uintptr_t)p->ss) & 3) == 0)
+ mark_visit_object(p->ss, stm_object_pages);
return NULL;
}
From noreply at buildbot.pypy.org Tue Aug 12 18:54:25 2014
From: noreply at buildbot.pypy.org (Raemi)
Date: Tue, 12 Aug 2014 18:54:25 +0200 (CEST)
Subject: [pypy-commit] stmgc default: add forking again to demo_random2
Message-ID: <20140812165425.ADE311C0157@cobra.cs.uni-duesseldorf.de>
Author: Remi Meier
Branch:
Changeset: r1310:3127164e93bf
Date: 2014-08-12 18:55 +0200
http://bitbucket.org/pypy/stmgc/changeset/3127164e93bf/
Log: add forking again to demo_random2
diff --git a/c7/demo/demo_random2.c b/c7/demo/demo_random2.c
--- a/c7/demo/demo_random2.c
+++ b/c7/demo/demo_random2.c
@@ -300,7 +300,7 @@
if (get_rand(100) < 98) {
stm_start_transaction(&stm_thread_local);
- } else{
+ } else {
stm_start_inevitable_transaction(&stm_thread_local);
}
td.roots_on_ss = td.roots_on_ss_at_tr_start;
@@ -308,15 +308,11 @@
pop_roots(pushed);
return NULL;
} else if (get_rand(10) == 1) {
- fprintf(stderr, "R");
-
long pushed = push_roots();
/* leaving our frame */
frame_loop();
/* back in our frame */
pop_roots(pushed);
-
- fprintf(stderr, "r");
return NULL;
} else if (get_rand(20) == 1) {
long pushed = push_roots();
@@ -324,9 +320,9 @@
assert(stm_is_inevitable());
pop_roots(pushed);
return NULL;
- } else if (get_rand(200) == 1) {
+ } else if (get_rand(20) == 1) {
return (objptr_t)-1; // possibly fork
- } else if (get_rand(240) == 1) {
+ } else if (get_rand(20) == 1) {
long pushed = push_roots();
stm_become_globally_unique_transaction(&stm_thread_local, "really");
fprintf(stderr, "[GUT/%d]", (int)STM_SEGMENT->segment_num);
@@ -355,26 +351,26 @@
if (p == (objptr_t)-1) {
p = NULL;
- /* long call_fork = (thread_may_fork != NULL && *(long *)thread_may_fork); */
- /* if (call_fork) { /\* common case *\/ */
- /* push_roots(); */
- /* /\* run a fork() inside the transaction *\/ */
- /* printf("========== FORK =========\n"); */
- /* *(long*)thread_may_fork = 0; */
- /* pid_t child = fork(); */
- /* printf("=== in process %d thread %lx, fork() returned %d\n", */
- /* (int)getpid(), (long)pthread_self(), (int)child); */
- /* if (child == -1) { */
- /* fprintf(stderr, "fork() error: %m\n"); */
- /* abort(); */
- /* } */
- /* if (child != 0) */
- /* num_forked_children++; */
- /* else */
- /* num_forked_children = 0; */
+ long call_fork = (thread_may_fork != NULL && *(long *)thread_may_fork);
+ if (call_fork) { /* common case */
+ long pushed = push_roots();
+ /* run a fork() inside the transaction */
+ printf("========== FORK =========\n");
+ *(long*)thread_may_fork = 0;
+ pid_t child = fork();
+ printf("=== in process %d thread %lx, fork() returned %d\n",
+ (int)getpid(), (long)pthread_self(), (int)child);
+ if (child == -1) {
+ fprintf(stderr, "fork() error: %m\n");
+ abort();
+ }
+ if (child != 0)
+ num_forked_children++;
+ else
+ num_forked_children = 0;
- /* pop_roots(); */
- /* } */
+ pop_roots(pushed);
+ }
}
}
assert(roots_on_ss == td.roots_on_ss);
From noreply at buildbot.pypy.org Tue Aug 12 20:54:36 2014
From: noreply at buildbot.pypy.org (numerodix)
Date: Tue, 12 Aug 2014 20:54:36 +0200 (CEST)
Subject: [pypy-commit] pypy py3.3-fixes2: make test_debugmallocstats cpython
only
Message-ID: <20140812185436.1CA5B1C0157@cobra.cs.uni-duesseldorf.de>
Author: Martin Matusiak
Branch: py3.3-fixes2
Changeset: r72768:ceebddacd8c1
Date: 2014-08-12 20:41 +0200
http://bitbucket.org/pypy/pypy/changeset/ceebddacd8c1/
Log: make test_debugmallocstats cpython only
diff --git a/lib-python/3/test/test_sys.py b/lib-python/3/test/test_sys.py
--- a/lib-python/3/test/test_sys.py
+++ b/lib-python/3/test/test_sys.py
@@ -588,6 +588,7 @@
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
+ @test.support.cpython_only
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
from test.script_helper import assert_python_ok
From noreply at buildbot.pypy.org Tue Aug 12 20:54:37 2014
From: noreply at buildbot.pypy.org (fijal)
Date: Tue, 12 Aug 2014 20:54:37 +0200 (CEST)
Subject: [pypy-commit] pypy py3.3: Merged in numerodix/pypy/py3.3-fixes2
(pull request #266)
Message-ID: <20140812185437.5E98F1C0157@cobra.cs.uni-duesseldorf.de>
Author: Maciej Fijalkowski
Branch: py3.3
Changeset: r72769:8d2fd0582587
Date: 2014-08-12 20:53 +0200
http://bitbucket.org/pypy/pypy/changeset/8d2fd0582587/
Log: Merged in numerodix/pypy/py3.3-fixes2 (pull request #266)
make test_debugmallocstats cpython only
diff --git a/lib-python/3/test/test_sys.py b/lib-python/3/test/test_sys.py
--- a/lib-python/3/test/test_sys.py
+++ b/lib-python/3/test/test_sys.py
@@ -588,6 +588,7 @@
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
+ @test.support.cpython_only
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
from test.script_helper import assert_python_ok
From noreply at buildbot.pypy.org Wed Aug 13 00:01:12 2014
From: noreply at buildbot.pypy.org (mattip)
Date: Wed, 13 Aug 2014 00:01:12 +0200 (CEST)
Subject: [pypy-commit] pypy ufuncapi: backed out changeset ca3b82260c3a,
signatures are still relevant
Message-ID: <20140812220112.129D71C0793@cobra.cs.uni-duesseldorf.de>
Author: mattip
Branch: ufuncapi
Changeset: r72770:86f97db12e19
Date: 2014-08-09 21:53 +0300
http://bitbucket.org/pypy/pypy/changeset/86f97db12e19/
Log: backed out changeset ca3b82260c3a, signatures are still relevant
diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py
--- a/pypy/module/micronumpy/test/test_ufuncs.py
+++ b/pypy/module/micronumpy/test/test_ufuncs.py
@@ -139,7 +139,7 @@
out_flat = out_array.flat
for i in range(in_array.size):
out_flat[i] = in_flat[i] * 2
- def double_times2(in_array, out_array):
+ def double_times2(space, __args__):
assert in_array.dtype == float
in_flat = in_array.flat
out_flat = out_array.flat
@@ -147,6 +147,7 @@
out_flat[i] = in_flat[i] * 2
from numpy import frompyfunc, dtype, arange
ufunc = frompyfunc([int_times2, double_times2], 1, 1,
+ signature='()->()',
dtypes=[dtype(int), dtype(int),
dtype(float), dtype(float)
]
diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py
--- a/pypy/module/micronumpy/ufuncs.py
+++ b/pypy/module/micronumpy/ufuncs.py
@@ -482,20 +482,22 @@
class W_UfuncGeneric(W_Ufunc):
'''
- Handle a number of python functions, each with a dtypes.
- The dtypes can specify the input, output args for the function.
- When called, the actual function used will be resolved by examining
- the input arg's dtypes.
+ Handle a number of python functions, each with a signature and dtypes.
+ The signature can specify how to create the inner loop, i.e.
+ (i,j),(j,k)->(i,k) for a dot-like matrix multiplication, and the dtypes
+ can specify the input, output args for the function. When called, the actual
+ function used will be resolved by examining the input arg's dtypes.
If dtypes == 'match', only one argument is provided and the output dtypes
will match the input dtype (not cpython numpy compatible)
'''
_immutable_fields_ = ["funcs", "dtypes", "data"]
- def __init__(self, space, funcs, name, identity, nin, nout, dtypes, match_dtypes=False):
- # XXX make sure funcs, dtypes, nin, nout are consistent
+ def __init__(self, space, funcs, name, identity, nin, nout, dtypes, signature, match_dtypes=False):
+ # XXX make sure funcs, signature, dtypes, nin, nout are consistent
- # These don't matter, we use the dtypes for determining output dtype
+ # These don't matter, we use the signature and dtypes for determining
+ # output dtype
promote_to_largest = promote_to_float = promote_bools = False
allow_bool = allow_complex = True
int_only = complex_to_float = False
@@ -512,6 +514,7 @@
raise oefmt(space.w_ValueError,
"generic ufunc with %d functions, %d arguments, but %d dtypes",
len(funcs), self.nargs, len(dtypes))
+ self.signature = signature
def reduce(self, space, w_obj, w_axis, keepdims=False, out=None, dtype=None,
cumulative=False):
@@ -542,30 +545,27 @@
new_shape = inargs[0].get_shape()
assert isinstance(outargs[0], W_NDimArray)
res_dtype = outargs[0].get_dtype()
- if not self.match_dtypes:
+ # XXX handle inner-loop indexing
+ sign_parts = self.signature.split('->')
+ if len(sign_parts) == 2 and sign_parts[0].strip() == '()' \
+ and sign_parts[1].strip() == '()':
+
+ arglist = space.newlist(inargs + outargs)
func = self.funcs[index]
space.call_function(func, *(inargs + outargs))
if len(outargs) < 2:
return outargs[0]
return outargs
- # XXX TODO handle more complicated signatures,
- # for now, assume (i) -> (i)
if len(outargs) < 2:
return loop.call_many_to_one(space, new_shape, self.funcs[index],
res_dtype, inargs, outargs[0])
return loop.call_many_to_many(space, new_shape, self.funcs[index],
res_dtype, inargs, outargs)
- def type_resolver(self, space, inargs, outargs):
+ def type_resolver(self, space, index, outargs):
# Find a match for the inargs.dtype in self.dtypes, like
# linear_search_type_resolver in numy ufunc_type_resolutions.c
- for i in range(0, len(self.dtypes), self.nargs):
- if inargs[0].get_dtype() == self.dtypes[i]:
- break
- else:
- raise oefmt(space.w_TypeError,
- 'input dtype %r did not match any known dtypes', inargs[0].get_dtype())
- return i / self.nargs
+ return 0
def alloc_outargs(self, space, index, inargs, outargs):
# Any None outarg should be allocated here
@@ -911,12 +911,12 @@
def get(space):
return space.fromcache(UfuncState)
- at unwrap_spec(nin=int, nout=int, w_identity=WrappedDefault(None),
+ at unwrap_spec(nin=int, nout=int, signature=str, w_identity=WrappedDefault(None),
name=str, doc=str)
-def frompyfunc(space, w_func, nin, nout, w_dtypes=None,
+def frompyfunc(space, w_func, nin, nout, w_dtypes=None, signature='',
w_identity=None, name='', doc=''):
''' frompyfunc(func, nin, nout) #cpython numpy compatible
- frompyfunc(func, nin, nout, dtypes=None,
+ frompyfunc(func, nin, nout, dtypes=None, signature='',
identity=None, name='', doc='')
Takes an arbitrary Python function and returns a ufunc.
@@ -934,6 +934,9 @@
The number of arrays returned by `func`.
dtypes: None or [dtype, ...] of the input, output args for each function,
or 'match' to force output to exactly match input dtype
+ signature*: str, default=''
+ The mapping of input args to output args, defining the
+ inner-loop indexing
identity*: None (default) or int
For reduce-type ufuncs, the default value
name: str, default=''
@@ -948,7 +951,7 @@
Notes
-----
- If the signature and dtypes are both missing, the returned ufunc always
+ If the signature and out_dtype are both missing, the returned ufunc always
returns PyObject arrays (cpython numpy compatability).
Examples
@@ -976,7 +979,7 @@
raise oefmt(space.w_TypeError, 'func must be callable')
func = [w_func]
match_dtypes = False
- if space.is_none(w_dtypes):
+ if space.is_none(w_dtypes) and not signature:
raise oefmt(space.w_NotImplementedError,
'object dtype requested but not implemented')
elif (space.isinstance_w(w_dtypes, space.w_tuple) or
@@ -988,7 +991,9 @@
else:
dtypes = [None]*len(_dtypes)
for i in range(len(dtypes)):
+ print 'decoding',_dtypes[i]
dtypes[i] = descriptor.decode_w_dtype(space, _dtypes[i])
+ print 'got',dtypes[i]
else:
raise oefmt(space.w_ValueError,
'dtypes must be None or a list of dtypes')
@@ -999,7 +1004,7 @@
identity = \
descriptor.get_dtype_cache(space).w_longdtype.box(w_identity)
- w_ret = W_UfuncGeneric(space, func, name, identity, nin, nout, dtypes,
+ w_ret = W_UfuncGeneric(space, func, name, identity, nin, nout, dtypes, signature,
match_dtypes=match_dtypes)
if doc:
w_ret.w_doc = space.wrap(doc)
From noreply at buildbot.pypy.org Wed Aug 13 00:01:13 2014
From: noreply at buildbot.pypy.org (mattip)
Date: Wed, 13 Aug 2014 00:01:13 +0200 (CEST)
Subject: [pypy-commit] pypy ufuncapi: make untranslated tests pass
Message-ID: <20140812220113.6AFB51C0793@cobra.cs.uni-duesseldorf.de>
Author: mattip
Branch: ufuncapi
Changeset: r72771:c12de969c24c
Date: 2014-08-09 22:54 +0300
http://bitbucket.org/pypy/pypy/changeset/c12de969c24c/
Log: make untranslated tests pass
diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py
--- a/pypy/module/micronumpy/test/test_ufuncs.py
+++ b/pypy/module/micronumpy/test/test_ufuncs.py
@@ -112,10 +112,11 @@
assert 'object' in str(e)
# Use pypy specific extension for out_dtype
adder_ufunc0 = frompyfunc(adder, 2, 1, dtypes=['match'])
- adder_ufunc1 = frompyfunc([adder, adder], 2, 1, dtypes=['match'])
- int_func22 = frompyfunc([int, int], 2, 2, signature='()->()',
+ adder_ufunc1 = frompyfunc([adder, adder], 2, 1,
+ dtypes=[int, int, int, float, float, float])
+ int_func22 = frompyfunc([int, int], 2, 2, signature='(i)->(i)',
dtypes=['match'])
- int_func12 = frompyfunc([int, int], 1, 2, signature='()->()',
+ int_func12 = frompyfunc([int], 1, 2, signature='(i)->(i)',
dtypes=['match'])
retype = dtype(int)
assert isinstance(adder_ufunc1, ufunc)
@@ -139,7 +140,7 @@
out_flat = out_array.flat
for i in range(in_array.size):
out_flat[i] = in_flat[i] * 2
- def double_times2(space, __args__):
+ def double_times2(in_array, out_array):
assert in_array.dtype == float
in_flat = in_array.flat
out_flat = out_array.flat
diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py
--- a/pypy/module/micronumpy/ufuncs.py
+++ b/pypy/module/micronumpy/ufuncs.py
@@ -562,10 +562,19 @@
return loop.call_many_to_many(space, new_shape, self.funcs[index],
res_dtype, inargs, outargs)
- def type_resolver(self, space, index, outargs):
- # Find a match for the inargs.dtype in self.dtypes, like
- # linear_search_type_resolver in numy ufunc_type_resolutions.c
- return 0
+ def type_resolver(self, space, inargs, outargs):
+ # Find a match for the inargs.dtype in self.dtypes, like
+ # linear_search_type_resolver in numy ufunc_type_resolutions.c
+ for i in range(0, len(self.dtypes), self.nargs):
+ if inargs[0].get_dtype() == self.dtypes[i]:
+ break
+ else:
+ if len(self.funcs) < 2:
+ return 0
+ raise oefmt(space.w_TypeError,
+ 'input dtype %s did not match any known dtypes',
+ str(inargs[0].get_dtype()))
+ return i / self.nargs
def alloc_outargs(self, space, index, inargs, outargs):
# Any None outarg should be allocated here
@@ -991,9 +1000,7 @@
else:
dtypes = [None]*len(_dtypes)
for i in range(len(dtypes)):
- print 'decoding',_dtypes[i]
dtypes[i] = descriptor.decode_w_dtype(space, _dtypes[i])
- print 'got',dtypes[i]
else:
raise oefmt(space.w_ValueError,
'dtypes must be None or a list of dtypes')
From noreply at buildbot.pypy.org Wed Aug 13 00:01:14 2014
From: noreply at buildbot.pypy.org (mattip)
Date: Wed, 13 Aug 2014 00:01:14 +0200 (CEST)
Subject: [pypy-commit] pypy ufuncapi: add asserts till translation passes
Message-ID: <20140812220114.B47E31C0793@cobra.cs.uni-duesseldorf.de>
Author: mattip
Branch: ufuncapi
Changeset: r72772:904129afbc30
Date: 2014-08-10 00:17 +0300
http://bitbucket.org/pypy/pypy/changeset/904129afbc30/
Log: add asserts till translation passes
diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py
--- a/pypy/module/micronumpy/loop.py
+++ b/pypy/module/micronumpy/loop.py
@@ -94,8 +94,9 @@
in_iters = [None] * nin
in_states = [None] * nin
for i in range(nin):
- assert isinstance(in_args[i], W_NDimArray)
- in_iter, in_state = in_args[i].create_iter(shape)
+ in_i = in_args[i]
+ assert isinstance(in_i, W_NDimArray)
+ in_iter, in_state = in_i.create_iter(shape)
in_iters[i] = in_iter
in_states[i] = in_state
shapelen = len(shape)
@@ -130,13 +131,15 @@
out_iters = [None] * nout
out_states = [None] * nout
for i in range(nin):
- assert isinstance(in_args[i], W_NDimArray)
- in_iter, in_state = in_args[i].create_iter(shape)
+ in_i = in_args[i]
+ assert isinstance(in_i, W_NDimArray)
+ in_iter, in_state = in_i.create_iter(shape)
in_iters[i] = in_iter
in_states[i] = in_state
for i in range(nout):
- assert isinstance(out_args[i], W_NDimArray)
- out_iter, out_state = out_args[i].create_iter(shape)
+ out_i = in_args[i]
+ assert isinstance(out_i, W_NDimArray)
+ out_iter, out_state = out_i.create_iter(shape)
out_iters[i] = out_iter
out_states[i] = out_state
shapelen = len(shape)
diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py
--- a/pypy/module/micronumpy/ufuncs.py
+++ b/pypy/module/micronumpy/ufuncs.py
@@ -541,10 +541,13 @@
'output arg %d must be an array, not %s', i+self.nin, str(args_w[i+self.nin]))
outargs[i] = out
index = self.type_resolver(space, inargs, outargs)
- self.alloc_outargs(space, index, inargs, outargs)
- new_shape = inargs[0].get_shape()
- assert isinstance(outargs[0], W_NDimArray)
- res_dtype = outargs[0].get_dtype()
+ outargs = self.alloc_outargs(space, index, inargs, outargs)
+ inargs0 = inargs[0]
+ outargs0 = outargs[0]
+ assert isinstance(inargs0, W_NDimArray)
+ assert isinstance(outargs0, W_NDimArray)
+ new_shape = inargs0.get_shape()
+ res_dtype = outargs0.get_dtype()
# XXX handle inner-loop indexing
sign_parts = self.signature.split('->')
if len(sign_parts) == 2 and sign_parts[0].strip() == '()' \
@@ -552,10 +555,11 @@
arglist = space.newlist(inargs + outargs)
func = self.funcs[index]
- space.call_function(func, *(inargs + outargs))
+ arglist = space.newlist(inargs + outargs)
+ space.call_args(func, Arguments.frompacked(space, arglist))
if len(outargs) < 2:
- return outargs[0]
- return outargs
+ return outargs0
+ return space.newtuple(outargs)
if len(outargs) < 2:
return loop.call_many_to_one(space, new_shape, self.funcs[index],
res_dtype, inargs, outargs[0])
@@ -563,27 +567,34 @@
res_dtype, inargs, outargs)
def type_resolver(self, space, inargs, outargs):
- # Find a match for the inargs.dtype in self.dtypes, like
- # linear_search_type_resolver in numy ufunc_type_resolutions.c
+ # Find a match for the inargs.dtype in self.dtypes, like
+ # linear_search_type_resolver in numy ufunc_type_resolutions.c
+ inargs0 = inargs[0]
+ assert isinstance(inargs0, W_NDimArray)
for i in range(0, len(self.dtypes), self.nargs):
- if inargs[0].get_dtype() == self.dtypes[i]:
+ if inargs0.get_dtype() == self.dtypes[i]:
break
else:
if len(self.funcs) < 2:
return 0
raise oefmt(space.w_TypeError,
'input dtype %s did not match any known dtypes',
- str(inargs[0].get_dtype()))
+ str(inargs0.get_dtype()))
return i / self.nargs
def alloc_outargs(self, space, index, inargs, outargs):
# Any None outarg should be allocated here
- temp_shape = inargs[0].get_shape() # XXX wrong!!!
- dtype = inargs[0].get_dtype() # XXX wrong!!!
- order = inargs[0].get_order()
+ inargs0 = inargs[0]
+ assert isinstance(inargs0, W_NDimArray)
+ temp_shape = inargs0.get_shape() # XXX wrong!!!
+ dtype = inargs0.get_dtype() # XXX wrong!!!
+ order = inargs0.get_order()
for i in range(len(outargs)):
if outargs[i] is None:
outargs[i] = W_NDimArray.from_shape(space, temp_shape, dtype, order)
+ for i in range(len(outargs)):
+ assert isinstance(outargs[i], W_NDimArray)
+ return outargs
def prep_call(self, space, index, inargs, outargs):
# Use the index and signature to determine
From noreply at buildbot.pypy.org Wed Aug 13 00:01:16 2014
From: noreply at buildbot.pypy.org (mattip)
Date: Wed, 13 Aug 2014 00:01:16 +0200 (CEST)
Subject: [pypy-commit] pypy ufuncapi: start to properly wrap raw ufunc for
frompyfunc
Message-ID: <20140812220116.0A7CC1C0793@cobra.cs.uni-duesseldorf.de>
Author: mattip
Branch: ufuncapi
Changeset: r72773:d706b1f3f1eb
Date: 2014-08-12 14:01 +0300
http://bitbucket.org/pypy/pypy/changeset/d706b1f3f1eb/
Log: start to properly wrap raw ufunc for frompyfunc
diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py
--- a/pypy/module/cpyext/ndarrayobject.py
+++ b/pypy/module/cpyext/ndarrayobject.py
@@ -13,6 +13,9 @@
from pypy.module.micronumpy.concrete import ConcreteArray
from pypy.module.micronumpy import ufuncs
from rpython.rlib.rawstorage import RAW_STORAGE_PTR
+from pypy.interpreter.typedef import TypeDef
+from pypy.interpreter.baseobjspace import W_Root
+from pypy.interpreter.gateway import interp2app
NPY_C_CONTIGUOUS = 0x0001
NPY_F_CONTIGUOUS = 0x0002
@@ -254,6 +257,29 @@
order=order, owning=owning, w_subtype=w_subtype)
npy_intpp = rffi.LONGP
+class W_GenericUFuncCaller(W_Root):
+ def __init__(self, func):
+ self.func = func
+
+ def descr_call(self, space, __args__):
+ args_w, kwds_w = __args__.unpack()
+ datap = rffi.CFixedArray(rffi.CCHARP, len(args_w))
+ dim_p = rffi.CFixedArray(npy_intpp, len(args_w))
+ stepp = rffi.CFixedArray(npy_intpp, len(args_w))
+ data = rffi.VOIDP
+ for i in len(args_w):
+ arg_i = args[i]
+ assert isinstance(arg_i, W_NDimArray)
+ datap[i] = cffi.cast(rffi.CCHARP, args.implementation.storage)
+ #This assumes we iterate over the last dimension?
+ dim_p[i] = arg_i.get_shape()[0]
+ stepp[i] = arg_i.get_strides()[0]
+ space.call_args(self.func, datap, dim_p, stepp, data)
+
+W_GenericUFuncCaller.typedef = TypeDef("hiddenclass",
+ __call__ = interp2app(W_GenericUFuncCaller.descr_call),
+)
+
GenericUfunc = lltype.FuncType([rffi.CArrayPtr(rffi.CCHARP), npy_intpp, npy_intpp,
rffi.VOIDP], rffi.VOIDP)
gufunctype = lltype.Ptr(GenericUfunc)
@@ -265,9 +291,13 @@
funcs_w = [None] * ntypes
dtypes_w = [None] * ntypes * (nin + nout)
for i in range(ntypes):
- funcs_w[i] = space.wrap(funcs[i])
- #print 'function',i,'is',funcs[i], hex(rffi.cast(lltype.Signed, funcs[i]))
+ funcs_w[i] = W_GenericUFuncCaller(funcs[i])
for i in range(ntypes*(nin+nout)):
dtypes_w[i] = get_dtype_cache(space).dtypes_by_num[ord(types[i])]
- return ufuncs.frompyfunc(space, space.newlist(funcs_w), nin, nout, dtypes_w,
- signature, identity, name, doc)
+ w_funcs = space.newlist(funcs_w)
+ w_dtypes = space.newlist(dtypes_w)
+ w_signature = rffi.charp2str(signature)
+ w_doc = rffi.charp2str(doc)
+ w_name = rffi.charp2str(name)
+ return ufuncs.frompyfunc(space, w_funcs, nin, nout, w_dtypes,
+ w_signature, identity, w_name, w_doc)
diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py
--- a/pypy/module/cpyext/test/test_ndarrayobject.py
+++ b/pypy/module/cpyext/test/test_ndarrayobject.py
@@ -1,7 +1,7 @@
+import py
from pypy.module.cpyext.test.test_api import BaseApiTest
from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
from rpython.rtyper.lltypesystem import rffi, lltype
-
from pypy.module.micronumpy.ndarray import W_NDimArray
from pypy.module.micronumpy.descriptor import get_dtype_cache
@@ -213,14 +213,16 @@
assert res.get_scalar_value().real == 3.
assert res.get_scalar_value().imag == 4.
- def test_Ufunc_FromFuncAndDataAndSignature(self. space, api):
+ def _test_Ufunc_FromFuncAndDataAndSignature(self, space, api):
+ py.test.skip('preliminary non-translated test')
+ '''
PyUFuncGenericFunction funcs[] = {&double_times2, &int_times2};
char types[] = { NPY_DOUBLE,NPY_DOUBLE, NPY_INT, NPY_INT };
void *array_data[] = {NULL, NULL};
ufunc = api._PyUFunc_FromFuncAndDataAndSignature(space, funcs, data,
types, ntypes, nin, nout, identity, doc, check_return,
signature)
-
+ '''
class AppTestNDArray(AppTestCpythonExtensionBase):
def test_ndarray_object_c(self):
From noreply at buildbot.pypy.org Wed Aug 13 00:01:17 2014
From: noreply at buildbot.pypy.org (mattip)
Date: Wed, 13 Aug 2014 00:01:17 +0200 (CEST)
Subject: [pypy-commit] pypy ufuncapi: c function is called but crashes
Message-ID: <20140812220117.5122C1C0793@cobra.cs.uni-duesseldorf.de>
Author: mattip
Branch: ufuncapi
Changeset: r72774:4f42ac64e652
Date: 2014-08-13 00:59 +0300
http://bitbucket.org/pypy/pypy/changeset/4f42ac64e652/
Log: c function is called but crashes
diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py
--- a/pypy/module/cpyext/ndarrayobject.py
+++ b/pypy/module/cpyext/ndarrayobject.py
@@ -12,9 +12,12 @@
from pypy.module.micronumpy.descriptor import get_dtype_cache, W_Dtype
from pypy.module.micronumpy.concrete import ConcreteArray
from pypy.module.micronumpy import ufuncs
-from rpython.rlib.rawstorage import RAW_STORAGE_PTR
+from rpython.rlib.rawstorage import (RAW_STORAGE_PTR, raw_storage_getitem, raw_storage_setitem,
+ free_raw_storage, alloc_raw_storage)
+from rpython.rlib.rarithmetic import LONG_BIT, _get_bitsize
from pypy.interpreter.typedef import TypeDef
from pypy.interpreter.baseobjspace import W_Root
+from pypy.interpreter.argument import Arguments
from pypy.interpreter.gateway import interp2app
NPY_C_CONTIGUOUS = 0x0001
@@ -257,31 +260,44 @@
order=order, owning=owning, w_subtype=w_subtype)
npy_intpp = rffi.LONGP
+LONG_SIZE = LONG_BIT / 8
+CCHARP_SIZE = _get_bitsize('P') / 8
+
class W_GenericUFuncCaller(W_Root):
def __init__(self, func):
self.func = func
def descr_call(self, space, __args__):
args_w, kwds_w = __args__.unpack()
- datap = rffi.CFixedArray(rffi.CCHARP, len(args_w))
- dim_p = rffi.CFixedArray(npy_intpp, len(args_w))
- stepp = rffi.CFixedArray(npy_intpp, len(args_w))
- data = rffi.VOIDP
- for i in len(args_w):
- arg_i = args[i]
+ dataps = alloc_raw_storage(CCHARP_SIZE * len(args_w), track_allocation=False)
+ dims = alloc_raw_storage(LONG_SIZE * len(args_w), track_allocation=False)
+ steps = alloc_raw_storage(LONG_SIZE * len(args_w), track_allocation=False)
+ user_data = None
+ for i in range(len(args_w)):
+ arg_i = args_w[i]
assert isinstance(arg_i, W_NDimArray)
- datap[i] = cffi.cast(rffi.CCHARP, args.implementation.storage)
+ raw_storage_setitem(dataps, CCHARP_SIZE * i, rffi.cast(rffi.CCHARP, arg_i.implementation.storage))
#This assumes we iterate over the last dimension?
- dim_p[i] = arg_i.get_shape()[0]
- stepp[i] = arg_i.get_strides()[0]
- space.call_args(self.func, datap, dim_p, stepp, data)
+ raw_storage_setitem(dims, LONG_SIZE * i, rffi.cast(rffi.LONG, arg_i.get_shape()[0]))
+ raw_storage_setitem(steps, LONG_SIZE * i, rffi.cast(rffi.LONG, arg_i.implementation.strides[0]))
+ try:
+ import pdb;pdb.set_trace()
+ self.func(rffi.cast(rffi.CArrayPtr(rffi.CCHARP), dataps),
+ rffi.cast(npy_intpp, dims), rffi.cast(npy_intpp, steps), user_data)
+ except:
+ import traceback; traceback.print_exc()
+ raise
+ finally:
+ free_raw_storage(dataps, track_allocation=False)
+ free_raw_storage(dims, track_allocation=False)
+ free_raw_storage(steps, track_allocation=False)
W_GenericUFuncCaller.typedef = TypeDef("hiddenclass",
__call__ = interp2app(W_GenericUFuncCaller.descr_call),
)
GenericUfunc = lltype.FuncType([rffi.CArrayPtr(rffi.CCHARP), npy_intpp, npy_intpp,
- rffi.VOIDP], rffi.VOIDP)
+ rffi.VOIDP], lltype.Void)
gufunctype = lltype.Ptr(GenericUfunc)
@cpython_api([rffi.CArrayPtr(gufunctype), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t,
Py_ssize_t, Py_ssize_t, rffi.CCHARP, rffi.CCHARP, Py_ssize_t,
@@ -299,5 +315,6 @@
w_signature = rffi.charp2str(signature)
w_doc = rffi.charp2str(doc)
w_name = rffi.charp2str(name)
- return ufuncs.frompyfunc(space, w_funcs, nin, nout, w_dtypes,
+ ufunc_generic = ufuncs.frompyfunc(space, w_funcs, nin, nout, w_dtypes,
w_signature, identity, w_name, w_doc)
+ return ufunc_generic
From noreply at buildbot.pypy.org Wed Aug 13 00:20:11 2014
From: noreply at buildbot.pypy.org (numerodix)
Date: Wed, 13 Aug 2014 00:20:11 +0200 (CEST)
Subject: [pypy-commit] pypy py3.3-fixes2: sys.exit() should produce a
SystemExit with code is None
Message-ID: <20140812222011.EF84E1C0157@cobra.cs.uni-duesseldorf.de>
Author: Martin Matusiak
Branch: py3.3-fixes2
Changeset: r72775:4558aef78acc
Date: 2014-08-12 21:34 +0200
http://bitbucket.org/pypy/pypy/changeset/4558aef78acc/
Log: sys.exit() should produce a SystemExit with code is None
diff --git a/pypy/module/exceptions/test/test_exc.py b/pypy/module/exceptions/test/test_exc.py
--- a/pypy/module/exceptions/test/test_exc.py
+++ b/pypy/module/exceptions/test/test_exc.py
@@ -127,6 +127,24 @@
assert SystemExit("x").code == "x"
assert SystemExit(1, 2).code == (1, 2)
+ def test_sys_exit(self):
+ import sys
+
+ exc = raises(SystemExit, sys.exit)
+ assert exc.value.code is None
+
+ exc = raises(SystemExit, sys.exit, 0)
+ assert exc.value.code == 0
+
+ exc = raises(SystemExit, sys.exit, 1)
+ assert exc.value.code == 1
+
+ exc = raises(SystemExit, sys.exit, 2)
+ assert exc.value.code == 2
+
+ exc = raises(SystemExit, sys.exit, (1, 2, 3))
+ assert exc.value.code == (1, 2, 3)
+
def test_str_unicode(self):
e = ValueError('àèì')
assert str(e) == 'àèì'
diff --git a/pypy/module/sys/app.py b/pypy/module/sys/app.py
--- a/pypy/module/sys/app.py
+++ b/pypy/module/sys/app.py
@@ -49,7 +49,7 @@
except:
return False # got an exception again... ignore, report the original
-def exit(exitcode=0):
+def exit(exitcode=None):
"""Exit the interpreter by raising SystemExit(exitcode).
If the exitcode is omitted or None, it defaults to zero (i.e., success).
If the exitcode is numeric, it will be used as the system exit status.
From noreply at buildbot.pypy.org Wed Aug 13 00:20:13 2014
From: noreply at buildbot.pypy.org (pjenvey)
Date: Wed, 13 Aug 2014 00:20:13 +0200 (CEST)
Subject: [pypy-commit] pypy py3.3: Merged in numerodix/pypy/py3.3-fixes2
(pull request #267)
Message-ID: <20140812222013.6270D1C0157@cobra.cs.uni-duesseldorf.de>
Author: Philip Jenvey
Branch: py3.3
Changeset: r72776:a52dc76c7d2f
Date: 2014-08-12 15:19 -0700
http://bitbucket.org/pypy/pypy/changeset/a52dc76c7d2f/
Log: Merged in numerodix/pypy/py3.3-fixes2 (pull request #267)
sys.exit() should produce a SystemExit with code is None
diff --git a/pypy/module/exceptions/test/test_exc.py b/pypy/module/exceptions/test/test_exc.py
--- a/pypy/module/exceptions/test/test_exc.py
+++ b/pypy/module/exceptions/test/test_exc.py
@@ -127,6 +127,24 @@
assert SystemExit("x").code == "x"
assert SystemExit(1, 2).code == (1, 2)
+ def test_sys_exit(self):
+ import sys
+
+ exc = raises(SystemExit, sys.exit)
+ assert exc.value.code is None
+
+ exc = raises(SystemExit, sys.exit, 0)
+ assert exc.value.code == 0
+
+ exc = raises(SystemExit, sys.exit, 1)
+ assert exc.value.code == 1
+
+ exc = raises(SystemExit, sys.exit, 2)
+ assert exc.value.code == 2
+
+ exc = raises(SystemExit, sys.exit, (1, 2, 3))
+ assert exc.value.code == (1, 2, 3)
+
def test_str_unicode(self):
e = ValueError('àèì')
assert str(e) == 'àèì'
diff --git a/pypy/module/sys/app.py b/pypy/module/sys/app.py
--- a/pypy/module/sys/app.py
+++ b/pypy/module/sys/app.py
@@ -49,7 +49,7 @@
except:
return False # got an exception again... ignore, report the original
-def exit(exitcode=0):
+def exit(exitcode=None):
"""Exit the interpreter by raising SystemExit(exitcode).
If the exitcode is omitted or None, it defaults to zero (i.e., success).
If the exitcode is numeric, it will be used as the system exit status.
From noreply at buildbot.pypy.org Wed Aug 13 00:39:45 2014
From: noreply at buildbot.pypy.org (wenzhuman)
Date: Wed, 13 Aug 2014 00:39:45 +0200 (CEST)
Subject: [pypy-commit] pypy gc_no_cleanup_nursery: clean out some unused
code and insert zero_gc_ptr after GcArray malloc
Message-ID: <20140812223945.7D3351C0547@cobra.cs.uni-duesseldorf.de>
Author: wenzhuman
Branch: gc_no_cleanup_nursery
Changeset: r72777:c6e682e7221d
Date: 2014-08-08 16:04 -0700
http://bitbucket.org/pypy/pypy/changeset/c6e682e7221d/
Log: clean out some unused code and insert zero_gc_ptr after GcArray
malloc
diff --git a/rpython/translator/exceptiontransform.py b/rpython/translator/exceptiontransform.py
--- a/rpython/translator/exceptiontransform.py
+++ b/rpython/translator/exceptiontransform.py
@@ -11,7 +11,6 @@
from rpython.rtyper.rmodel import inputconst
from rpython.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong
from rpython.rlib.rarithmetic import r_singlefloat, r_longfloat
-from rpython.rlib.debug import ll_assert
from rpython.rtyper.llannotation import lltype_to_annotation
from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator
from rpython.tool.sourcetools import func_with_new_name
@@ -270,9 +269,6 @@
if need_exc_matching:
assert lastblock.exitswitch == c_last_exception
if not self.raise_analyzer.can_raise(lastblock.operations[-1]):
- #print ("operation %s cannot raise, but has exception"
- # " guarding in graph %s" % (lastblock.operations[-1],
- # graph))
lastblock.exitswitch = None
lastblock.recloseblock(lastblock.exits[0])
lastblock.exits[0].exitcase = None
@@ -393,10 +389,6 @@
return newgraph, SpaceOperation("direct_call", [fptr] + callargs, op.result)
def gen_exc_check(self, block, returnblock, normalafterblock=None):
- #var_exc_occured = Variable()
- #var_exc_occured.concretetype = lltype.Bool
- #block.operations.append(SpaceOperation("safe_call", [self.rpyexc_occured_ptr], var_exc_occured))
-
llops = rtyper.LowLevelOpList(None)
spaceop = block.operations[-1]
@@ -425,9 +417,8 @@
l0.exitcase = l0.llexitcase = True
block.recloseblock(l0, l)
-
insert_zeroing_op = False
- if spaceop.opname == 'malloc':
+ if spaceop.opname in ['malloc','malloc_varsize']:
flavor = spaceop.args[1].value['flavor']
if flavor == 'gc':
insert_zeroing_op = True
From noreply at buildbot.pypy.org Wed Aug 13 00:39:46 2014
From: noreply at buildbot.pypy.org (wenzhuman)
Date: Wed, 13 Aug 2014 00:39:46 +0200 (CEST)
Subject: [pypy-commit] pypy gc_no_cleanup_nursery: add import
Message-ID: <20140812223946.B7CFC1C0793@cobra.cs.uni-duesseldorf.de>
Author: wenzhuman
Branch: gc_no_cleanup_nursery
Changeset: r72778:05756b433478
Date: 2014-08-11 16:28 -0700
http://bitbucket.org/pypy/pypy/changeset/05756b433478/
Log: add import
diff --git a/rpython/translator/exceptiontransform.py b/rpython/translator/exceptiontransform.py
--- a/rpython/translator/exceptiontransform.py
+++ b/rpython/translator/exceptiontransform.py
@@ -11,6 +11,7 @@
from rpython.rtyper.rmodel import inputconst
from rpython.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong
from rpython.rlib.rarithmetic import r_singlefloat, r_longfloat
+from rpython.rlib.debug import ll_assert
from rpython.rtyper.llannotation import lltype_to_annotation
from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator
from rpython.tool.sourcetools import func_with_new_name
From noreply at buildbot.pypy.org Wed Aug 13 00:39:47 2014
From: noreply at buildbot.pypy.org (wenzhuman)
Date: Wed, 13 Aug 2014 00:39:47 +0200 (CEST)
Subject: [pypy-commit] pypy gc_no_cleanup_nursery: add tests
Message-ID: <20140812223947.E210F1C0793@cobra.cs.uni-duesseldorf.de>
Author: wenzhuman
Branch: gc_no_cleanup_nursery
Changeset: r72779:21a70c2f9848
Date: 2014-08-11 18:42 -0700
http://bitbucket.org/pypy/pypy/changeset/21a70c2f9848/
Log: add tests
diff --git a/rpython/memory/gc/test/test_direct.py b/rpython/memory/gc/test/test_direct.py
--- a/rpython/memory/gc/test/test_direct.py
+++ b/rpython/memory/gc/test/test_direct.py
@@ -107,7 +107,7 @@
def malloc(self, TYPE, n=None):
addr = self.gc.malloc(self.get_type_id(TYPE), n, zero=True)
obj_ptr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(TYPE))
- #TODO: only zero fields if there is gc filed add something like has_gc_ptr()
+
if not self.gc.malloc_zero_filled:
zero_gc_pointers_inside(obj_ptr, TYPE)
return obj_ptr
@@ -667,9 +667,25 @@
class TestIncrementalMiniMarkGCFull(DirectGCTest):
from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass
- def test_no_cleanup(self):
+ def test_malloc_fixedsize_no_cleanup(self):
p = self.malloc(S)
import pytest
with pytest.raises(lltype.UninitializedMemoryAccess):
x1 = p.x
-
\ No newline at end of file
+ assert p.prev == lltype.nullptr(S)
+ assert p.next == lltype.nullptr(S)
+
+ def test_malloc_varsize_no_cleanup(self):
+ x = lltype.Signed
+ VAR1 = lltype.GcArray(x)
+ p = self.malloc(VAR1,5)
+ import pytest
+ with pytest.raises(lltype.UninitializedMemoryAccess):
+ x1 = p[0]
+
+ def test_malloc_varsize_no_cleanup2(self):
+ p = self.malloc(VAR,100)
+ for i in range(100):
+ assert p[0] == lltype.nullptr(S)
+ assert False
+
diff --git a/rpython/memory/test/test_transformed_gc.py b/rpython/memory/test/test_transformed_gc.py
--- a/rpython/memory/test/test_transformed_gc.py
+++ b/rpython/memory/test/test_transformed_gc.py
@@ -1,6 +1,7 @@
import py
import inspect
+from rpython.rlib.objectmodel import compute_hash, compute_identity_hash
from rpython.translator.c import gc
from rpython.annotator import model as annmodel
from rpython.rtyper.llannotation import SomePtr
@@ -13,6 +14,7 @@
from rpython.conftest import option
from rpython.rlib.rstring import StringBuilder
from rpython.rlib.rarithmetic import LONG_BIT
+import pdb
WORD = LONG_BIT // 8
@@ -154,7 +156,6 @@
class GenericGCTests(GCTest):
GC_CAN_SHRINK_ARRAY = False
-
def define_instances(cls):
class A(object):
pass
@@ -709,7 +710,6 @@
GC_CAN_MOVE = True
GC_CAN_MALLOC_NONMOVABLE = False
GC_CAN_TEST_ID = False
-
def define_many_ids(cls):
class A(object):
pass
@@ -1118,6 +1118,7 @@
def test_adr_of_nursery(self):
run = self.runner("adr_of_nursery")
res = run([])
+
class TestGenerationalNoFullCollectGC(GCTest):
# test that nursery is doing its job and that no full collection
@@ -1178,7 +1179,7 @@
'large_object': 8*WORD,
'translated_to_c': False}
root_stack_depth = 200
-
+
def define_ref_from_rawmalloced_to_regular(cls):
import gc
S = lltype.GcStruct('S', ('x', lltype.Signed))
@@ -1232,8 +1233,7 @@
def test_malloc_nonmovable_fixsize(self):
py.test.skip("not supported")
-
-
+
class TestMiniMarkGC(TestHybridGC):
gcname = "minimark"
GC_CAN_TEST_ID = True
@@ -1250,7 +1250,7 @@
'translated_to_c': False,
}
root_stack_depth = 200
-
+
def define_no_clean_setarrayitems(cls):
# The optimization find_clean_setarrayitems() in
# gctransformer/framework.py does not work with card marking.
@@ -1275,6 +1275,29 @@
run = self.runner("no_clean_setarrayitems")
res = run([])
assert res == 123
+
+ def define_nursery_hash_base(cls):
+ class A:
+ pass
+ def fn():
+ objects = []
+ hashes = []
+ for i in range(200):
+ rgc.collect(0) # nursery-only collection, if possible
+ obj = A()
+ objects.append(obj)
+ hashes.append(compute_identity_hash(obj))
+ unique = {}
+ for i in range(len(objects)):
+ assert compute_identity_hash(objects[i]) == hashes[i]
+ unique[hashes[i]] = None
+ return len(unique)
+ return fn
+
+ def test_nursery_hash_base(self):
+ res = self.runner('nursery_hash_base')
+ assert res >= 195
+ assert False
class TestIncrementalMiniMarkGC(TestMiniMarkGC):
gcname = "incminimark"
@@ -1292,8 +1315,58 @@
'translated_to_c': False,
}
root_stack_depth = 200
+
+ def define_malloc_array_of_gcptr(self):
+ S = lltype.GcStruct('S', ('x', lltype.Signed))
+ A = lltype.GcArray(lltype.Ptr(S))
+ def f():
+ lst = lltype.malloc(A, 5, zero= False)
+ return (lst[0] == lltype.nullptr(S)
+ and lst[1] == lltype.nullptr(S)
+ and lst[2] == lltype.nullptr(S)
+ and lst[3] == lltype.nullptr(S)
+ and lst[4] == lltype.nullptr(S))
+ return f
+
+ def test_malloc_array_of_gcptr(self):
+ run = self.runner('malloc_array_of_gcptr')
+ res = run([])
+ assert not res
+ '''
+ def define_malloc_struct_of_gcptr(cls):
+ S1 = lltype.GcStruct('S', ('x', lltype.Signed))
+ S = lltype.GcStruct('S',
+ ('x', lltype.Signed),
+ ('filed1', lltype.Ptr(S1)),
+ ('filed2', lltype.Ptr(S1)))
+ s0 = lltype.malloc(S)
+ def f():
+ return (s0.filed1 == lltype.nullptr(S1) and s0.filed2 == lltype.nullptr(S1))
+ return f
+ def test_malloc_struct_of_gcptr(self):
+ run = self.runner("malloc_struct_of_gcptr")
+ res = run([])
+ assert res
+ '''
+ '''
+ def define_malloc_struct_of_gcptr(cls):
+ S = lltype.GcForwardReference()
+ S.become(lltype.GcStruct('S',
+ ('x', lltype.Signed),
+ ('prev', lltype.Ptr(S)),
+ ('next', lltype.Ptr(S))))
+ s0 = lltype.malloc(S,zero = False)
+ def f():
+ return s0.next == lltype.nullptr(S)
+ return f
+ def test_malloc_struct_of_gcptr(self):
+ run = self.runner("malloc_struct_of_gcptr")
+ pdb.set_trace()
+ res = run([])
+ assert res
+ '''
# ________________________________________________________________
# tagged pointers
From noreply at buildbot.pypy.org Wed Aug 13 00:39:49 2014
From: noreply at buildbot.pypy.org (wenzhuman)
Date: Wed, 13 Aug 2014 00:39:49 +0200 (CEST)
Subject: [pypy-commit] pypy gc_no_cleanup_nursery: add test
Message-ID: <20140812223949.29E451C0793@cobra.cs.uni-duesseldorf.de>
Author: wenzhuman
Branch: gc_no_cleanup_nursery
Changeset: r72780:44d322ba289f
Date: 2014-08-12 15:37 -0700
http://bitbucket.org/pypy/pypy/changeset/44d322ba289f/
Log: add test
diff --git a/rpython/memory/gc/test/test_direct.py b/rpython/memory/gc/test/test_direct.py
--- a/rpython/memory/gc/test/test_direct.py
+++ b/rpython/memory/gc/test/test_direct.py
@@ -12,6 +12,7 @@
from rpython.rlib.rarithmetic import LONG_BIT, is_valid_int
from rpython.memory.gc import minimark, incminimark
from rpython.memory.gctypelayout import zero_gc_pointers_inside
+from rpython.rlib.debug import debug_print
WORD = LONG_BIT // 8
ADDR_ARRAY = lltype.Array(llmemory.Address)
@@ -670,8 +671,10 @@
def test_malloc_fixedsize_no_cleanup(self):
p = self.malloc(S)
import pytest
+ #ensure the memory is uninitialized
with pytest.raises(lltype.UninitializedMemoryAccess):
x1 = p.x
+ #ensure all the ptr fields are zeroed
assert p.prev == lltype.nullptr(S)
assert p.next == lltype.nullptr(S)
@@ -686,6 +689,52 @@
def test_malloc_varsize_no_cleanup2(self):
p = self.malloc(VAR,100)
for i in range(100):
+ print type(p[0])
assert p[0] == lltype.nullptr(S)
- assert False
+ def test_malloc_struct_of_ptr_arr(self):
+ S2 = lltype.GcForwardReference()
+ S2.become(lltype.GcStruct('S2',
+ ('gcptr_arr', VAR)))
+ s2 = self.malloc(S2)
+ s2.gcptr_arr = self.malloc(VAR,100)
+ for i in range(100):
+ assert s2.gcptr_arr[i] == lltype.nullptr(S)
+
+ def test_malloc_struct_of_ptr_struct(self):
+ S3 = lltype.GcForwardReference()
+ S3.become(lltype.GcStruct('S3',
+ ('gcptr_struct', S),
+ ('prev', lltype.Ptr(S)),
+ ('next', lltype.Ptr(S))))
+ s3 = self.malloc(S3)
+ assert s3.gcptr_struct.prev == lltype.nullptr(S)
+ assert s3.gcptr_struct.next == lltype.nullptr(S)
+
+ def test_malloc_array_of_ptr_struct(self):
+ ARR_OF_PTR_STRUCT = lltype.GcArray(lltype.Ptr(S))
+ arr_of_ptr_struct = self.malloc(ARR_OF_PTR_STRUCT,5)
+ for i in range(5):
+ assert arr_of_ptr_struct[i] == lltype.nullptr(S)
+ assert arr_of_ptr_struct[i] == lltype.nullptr(S)
+ arr_of_ptr_struct[i] = self.malloc(S)
+ assert arr_of_ptr_struct[i].prev == lltype.nullptr(S)
+ assert arr_of_ptr_struct[i].next == lltype.nullptr(S)
+
+
+ def test_malloc_array_of_ptr_arr(self):
+ ARR_OF_PTR_ARR = lltype.GcArray(lltype.Ptr(lltype.GcArray(lltype.Ptr(S))))
+ arr_of_ptr_arr = lltype.malloc(ARR_OF_PTR_ARR, 10)
+ for i in range(10):
+ assert arr_of_ptr_arr[i] == lltype.nullptr(lltype.GcArray(lltype.Ptr(S)))
+ for i in range(10):
+ arr_of_ptr_arr[i] = self.malloc(lltype.GcArray(lltype.Ptr(S)), i)
+ debug_print (arr_of_ptr_arr[i])
+ for elem in arr_of_ptr_arr[i]:
+ debug_print(elem)
+ assert elem == lltype.nullptr(S)
+ elem = self.malloc(S)
+ #assert elem.prev == lltype.nullptr(S)
+ #assert elem.next == lltype.nullptr(S)
+
+
\ No newline at end of file
From noreply at buildbot.pypy.org Wed Aug 13 01:44:58 2014
From: noreply at buildbot.pypy.org (pjenvey)
Date: Wed, 13 Aug 2014 01:44:58 +0200 (CEST)
Subject: [pypy-commit] pypy py3k: merge default
Message-ID: <20140812234458.811281C0157@cobra.cs.uni-duesseldorf.de>
Author: Philip Jenvey
Branch: py3k
Changeset: r72781:92b4b658ae4b
Date: 2014-08-12 16:43 -0700
http://bitbucket.org/pypy/pypy/changeset/92b4b658ae4b/
Log: merge default
diff --git a/lib_pypy/_tkinter/__init__.py b/lib_pypy/_tkinter/__init__.py
--- a/lib_pypy/_tkinter/__init__.py
+++ b/lib_pypy/_tkinter/__init__.py
@@ -30,6 +30,10 @@
return TkApp(screenName, className,
interactive, wantobjects, wantTk, sync, use)
+def dooneevent(flags=0):
+ return tklib.Tcl_DoOneEvent(flags)
+
+
def _flatten(item):
def _flatten1(output, item, depth):
if depth > 1000:
diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py
--- a/rpython/jit/backend/x86/callbuilder.py
+++ b/rpython/jit/backend/x86/callbuilder.py
@@ -1,8 +1,9 @@
+import sys
from rpython.rlib.clibffi import FFI_DEFAULT_ABI
from rpython.rlib.objectmodel import we_are_translated
from rpython.jit.metainterp.history import INT, FLOAT
from rpython.jit.backend.x86.arch import (WORD, IS_X86_64, IS_X86_32,
- PASS_ON_MY_FRAME)
+ PASS_ON_MY_FRAME, FRAME_FIXED_SIZE)
from rpython.jit.backend.x86.regloc import (eax, ecx, edx, ebx, esp, ebp, esi,
xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, r8, r9, r10, r11, edi,
r12, r13, r14, r15, X86_64_SCRATCH_REG, X86_64_XMM_SCRATCH_REG,
@@ -15,6 +16,8 @@
# Same for gcc 4.5.0, better safe than sorry
CALL_ALIGN = 16 // WORD
+stdcall_or_cdecl = sys.platform == "win32"
+
def align_stack_words(words):
return (words + CALL_ALIGN - 1) & ~(CALL_ALIGN-1)
@@ -44,11 +47,6 @@
self.stack_max = PASS_ON_MY_FRAME - asmgcroot.JIT_USE_WORDS
assert self.stack_max >= 3
- def emit_raw_call(self):
- self.mc.CALL(self.fnloc)
- if self.callconv != FFI_DEFAULT_ABI:
- self.current_esp += self._fix_stdcall(self.callconv)
-
def subtract_esp_aligned(self, count):
if count > 0:
align = align_stack_words(count)
@@ -246,6 +244,28 @@
self.fnloc = RawEspLoc(p - WORD, INT)
+ def emit_raw_call(self):
+ if stdcall_or_cdecl and self.is_call_release_gil:
+ # Dynamically accept both stdcall and cdecl functions.
+ # We could try to detect from pyjitpl which calling
+ # convention this particular function takes, which would
+ # avoid these two extra MOVs... but later. The ebp register
+ # is unused here: it will be reloaded from the shadowstack.
+ # (This doesn't work during testing, though. Hack hack hack.)
+ save_ebp = not self.asm.cpu.gc_ll_descr.is_shadow_stack()
+ ofs = WORD * (FRAME_FIXED_SIZE - 1)
+ if save_ebp: # only for testing (or with Boehm)
+ self.mc.MOV_sr(ofs, ebp.value)
+ self.mc.MOV(ebp, esp)
+ self.mc.CALL(self.fnloc)
+ self.mc.MOV(esp, ebp)
+ if save_ebp: # only for testing (or with Boehm)
+ self.mc.MOV_rs(ebp.value, ofs)
+ else:
+ self.mc.CALL(self.fnloc)
+ if self.callconv != FFI_DEFAULT_ABI:
+ self.current_esp += self._fix_stdcall(self.callconv)
+
def _fix_stdcall(self, callconv):
from rpython.rlib.clibffi import FFI_STDCALL
assert callconv == FFI_STDCALL
@@ -417,8 +437,9 @@
remap_frame_layout(self.asm, src_locs, dst_locs, X86_64_SCRATCH_REG)
- def _fix_stdcall(self, callconv):
- assert 0 # should not occur on 64-bit
+ def emit_raw_call(self):
+ assert self.callconv == FFI_DEFAULT_ABI
+ self.mc.CALL(self.fnloc)
def load_result(self):
if self.restype == 'S':
diff --git a/rpython/jit/backend/x86/test/test_runner.py b/rpython/jit/backend/x86/test/test_runner.py
--- a/rpython/jit/backend/x86/test/test_runner.py
+++ b/rpython/jit/backend/x86/test/test_runner.py
@@ -438,20 +438,26 @@
if WORD != 4:
py.test.skip("32-bit only test")
from rpython.jit.backend.x86.regloc import eax, edx
- from rpython.jit.backend.x86 import codebuf
+ from rpython.jit.backend.x86 import codebuf, callbuilder
from rpython.jit.codewriter.effectinfo import EffectInfo
from rpython.rlib.libffi import types, clibffi
had_stdcall = hasattr(clibffi, 'FFI_STDCALL')
if not had_stdcall: # not running on Windows, but we can still test
monkeypatch.setattr(clibffi, 'FFI_STDCALL', 12345, raising=False)
+ monkeypatch.setattr(callbuilder, 'stdcall_or_cdecl', True)
+ else:
+ assert callbuilder.stdcall_or_cdecl
#
- for ffi in [clibffi.FFI_DEFAULT_ABI, clibffi.FFI_STDCALL]:
+ for real_ffi, reported_ffi in [
+ (clibffi.FFI_DEFAULT_ABI, clibffi.FFI_DEFAULT_ABI),
+ (clibffi.FFI_STDCALL, clibffi.FFI_DEFAULT_ABI),
+ (clibffi.FFI_STDCALL, clibffi.FFI_STDCALL)]:
cpu = self.cpu
mc = codebuf.MachineCodeBlockWrapper()
mc.MOV_rs(eax.value, 4) # argument 1
mc.MOV_rs(edx.value, 40) # argument 10
mc.SUB_rr(eax.value, edx.value) # return arg1 - arg10
- if ffi == clibffi.FFI_DEFAULT_ABI:
+ if real_ffi == clibffi.FFI_DEFAULT_ABI:
mc.RET()
else:
mc.RET16_i(40)
@@ -459,7 +465,7 @@
#
calldescr = cpu._calldescr_dynamic_for_tests([types.slong] * 10,
types.slong)
- calldescr.get_call_conv = lambda: ffi # <==== hack
+ calldescr.get_call_conv = lambda: reported_ffi # <==== hack
# ^^^ we patch get_call_conv() so that the test also makes sense
# on Linux, because clibffi.get_call_conv() would always
# return FFI_DEFAULT_ABI on non-Windows platforms.
From noreply at buildbot.pypy.org Wed Aug 13 01:45:01 2014
From: noreply at buildbot.pypy.org (pjenvey)
Date: Wed, 13 Aug 2014 01:45:01 +0200 (CEST)
Subject: [pypy-commit] pypy py3.3: merge py3k
Message-ID: <20140812234501.8C2B61C0157@cobra.cs.uni-duesseldorf.de>
Author: Philip Jenvey
Branch: py3.3
Changeset: r72782:ed05d05aefb3
Date: 2014-08-12 16:44 -0700
http://bitbucket.org/pypy/pypy/changeset/ed05d05aefb3/
Log: merge py3k
diff too long, truncating to 2000 out of 11009 lines
diff --git a/_pytest/__init__.py b/_pytest/__init__.py
--- a/_pytest/__init__.py
+++ b/_pytest/__init__.py
@@ -1,2 +1,2 @@
#
-__version__ = '2.2.4.dev2'
+__version__ = '2.5.2'
diff --git a/_pytest/_argcomplete.py b/_pytest/_argcomplete.py
new file mode 100644
--- /dev/null
+++ b/_pytest/_argcomplete.py
@@ -0,0 +1,104 @@
+
+"""allow bash-completion for argparse with argcomplete if installed
+needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail
+to find the magic string, so _ARGCOMPLETE env. var is never set, and
+this does not need special code.
+
+argcomplete does not support python 2.5 (although the changes for that
+are minor).
+
+Function try_argcomplete(parser) should be called directly before
+the call to ArgumentParser.parse_args().
+
+The filescompleter is what you normally would use on the positional
+arguments specification, in order to get "dirname/" after "dirn"
+instead of the default "dirname ":
+
+ optparser.add_argument(Config._file_or_dir, nargs='*'
+ ).completer=filescompleter
+
+Other, application specific, completers should go in the file
+doing the add_argument calls as they need to be specified as .completer
+attributes as well. (If argcomplete is not installed, the function the
+attribute points to will not be used).
+
+SPEEDUP
+=======
+The generic argcomplete script for bash-completion
+(/etc/bash_completion.d/python-argcomplete.sh )
+uses a python program to determine startup script generated by pip.
+You can speed up completion somewhat by changing this script to include
+ # PYTHON_ARGCOMPLETE_OK
+so the the python-argcomplete-check-easy-install-script does not
+need to be called to find the entry point of the code and see if that is
+marked with PYTHON_ARGCOMPLETE_OK
+
+INSTALL/DEBUGGING
+=================
+To include this support in another application that has setup.py generated
+scripts:
+- add the line:
+ # PYTHON_ARGCOMPLETE_OK
+ near the top of the main python entry point
+- include in the file calling parse_args():
+ from _argcomplete import try_argcomplete, filescompleter
+ , call try_argcomplete just before parse_args(), and optionally add
+ filescompleter to the positional arguments' add_argument()
+If things do not work right away:
+- switch on argcomplete debugging with (also helpful when doing custom
+ completers):
+ export _ARC_DEBUG=1
+- run:
+ python-argcomplete-check-easy-install-script $(which appname)
+ echo $?
+ will echo 0 if the magic line has been found, 1 if not
+- sometimes it helps to find early on errors using:
+ _ARGCOMPLETE=1 _ARC_DEBUG=1 appname
+ which should throw a KeyError: 'COMPLINE' (which is properly set by the
+ global argcomplete script).
+"""
+
+import sys
+import os
+from glob import glob
+
+class FastFilesCompleter:
+ 'Fast file completer class'
+ def __init__(self, directories=True):
+ self.directories = directories
+
+ def __call__(self, prefix, **kwargs):
+ """only called on non option completions"""
+ if os.path.sep in prefix[1:]: #
+ prefix_dir = len(os.path.dirname(prefix) + os.path.sep)
+ else:
+ prefix_dir = 0
+ completion = []
+ globbed = []
+ if '*' not in prefix and '?' not in prefix:
+ if prefix[-1] == os.path.sep: # we are on unix, otherwise no bash
+ globbed.extend(glob(prefix + '.*'))
+ prefix += '*'
+ globbed.extend(glob(prefix))
+ for x in sorted(globbed):
+ if os.path.isdir(x):
+ x += '/'
+ # append stripping the prefix (like bash, not like compgen)
+ completion.append(x[prefix_dir:])
+ return completion
+
+if os.environ.get('_ARGCOMPLETE'):
+ # argcomplete 0.5.6 is not compatible with python 2.5.6: print/with/format
+ if sys.version_info[:2] < (2, 6):
+ sys.exit(1)
+ try:
+ import argcomplete.completers
+ except ImportError:
+ sys.exit(-1)
+ filescompleter = FastFilesCompleter()
+
+ def try_argcomplete(parser):
+ argcomplete.autocomplete(parser)
+else:
+ def try_argcomplete(parser): pass
+ filescompleter = None
diff --git a/_pytest/assertion/__init__.py b/_pytest/assertion/__init__.py
--- a/_pytest/assertion/__init__.py
+++ b/_pytest/assertion/__init__.py
@@ -3,7 +3,6 @@
"""
import py
import sys
-import pytest
from _pytest.monkeypatch import monkeypatch
from _pytest.assertion import util
@@ -19,8 +18,8 @@
to provide assert expression information. """)
group.addoption('--no-assert', action="store_true", default=False,
dest="noassert", help="DEPRECATED equivalent to --assert=plain")
- group.addoption('--nomagic', action="store_true", default=False,
- dest="nomagic", help="DEPRECATED equivalent to --assert=plain")
+ group.addoption('--nomagic', '--no-magic', action="store_true",
+ default=False, help="DEPRECATED equivalent to --assert=plain")
class AssertionState:
"""State for the assertion plugin."""
@@ -35,22 +34,25 @@
mode = "plain"
if mode == "rewrite":
try:
- import ast
+ import ast # noqa
except ImportError:
mode = "reinterp"
else:
- if sys.platform.startswith('java'):
+ # Both Jython and CPython 2.6.0 have AST bugs that make the
+ # assertion rewriting hook malfunction.
+ if (sys.platform.startswith('java') or
+ sys.version_info[:3] == (2, 6, 0)):
mode = "reinterp"
if mode != "plain":
_load_modules(mode)
m = monkeypatch()
config._cleanup.append(m.undo)
m.setattr(py.builtin.builtins, 'AssertionError',
- reinterpret.AssertionError)
+ reinterpret.AssertionError) # noqa
hook = None
if mode == "rewrite":
- hook = rewrite.AssertionRewritingHook()
- sys.meta_path.append(hook)
+ hook = rewrite.AssertionRewritingHook() # noqa
+ sys.meta_path.insert(0, hook)
warn_about_missing_assertion(mode)
config._assertstate = AssertionState(config, mode)
config._assertstate.hook = hook
@@ -73,9 +75,16 @@
def callbinrepr(op, left, right):
hook_result = item.ihook.pytest_assertrepr_compare(
config=item.config, op=op, left=left, right=right)
+
for new_expl in hook_result:
if new_expl:
- res = '\n~'.join(new_expl)
+ # Don't include pageloads of data unless we are very
+ # verbose (-vv)
+ if (sum(len(p) for p in new_expl[1:]) > 80*8
+ and item.config.option.verbose < 2):
+ new_expl[1:] = [py.builtin._totext(
+ 'Detailed information truncated, use "-vv" to show')]
+ res = py.builtin._totext('\n~').join(new_expl)
if item.config.getvalue("assertmode") == "rewrite":
# The result will be fed back a python % formatting
# operation, which will fail if there are extraneous
@@ -95,9 +104,9 @@
def _load_modules(mode):
"""Lazily import assertion related code."""
global rewrite, reinterpret
- from _pytest.assertion import reinterpret
+ from _pytest.assertion import reinterpret # noqa
if mode == "rewrite":
- from _pytest.assertion import rewrite
+ from _pytest.assertion import rewrite # noqa
def warn_about_missing_assertion(mode):
try:
diff --git a/_pytest/assertion/newinterpret.py b/_pytest/assertion/newinterpret.py
--- a/_pytest/assertion/newinterpret.py
+++ b/_pytest/assertion/newinterpret.py
@@ -11,7 +11,7 @@
from _pytest.assertion.reinterpret import BuiltinAssertionError
-if sys.platform.startswith("java") and sys.version_info < (2, 5, 2):
+if sys.platform.startswith("java"):
# See http://bugs.jython.org/issue1497
_exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict",
"ListComp", "GeneratorExp", "Yield", "Compare", "Call",
diff --git a/_pytest/assertion/oldinterpret.py b/_pytest/assertion/oldinterpret.py
--- a/_pytest/assertion/oldinterpret.py
+++ b/_pytest/assertion/oldinterpret.py
@@ -526,10 +526,13 @@
# example:
def f():
return 5
+
def g():
return 3
+
def h(x):
return 'never'
+
check("f() * g() == 5")
check("not f()")
check("not (f() and g() or 0)")
diff --git a/_pytest/assertion/reinterpret.py b/_pytest/assertion/reinterpret.py
--- a/_pytest/assertion/reinterpret.py
+++ b/_pytest/assertion/reinterpret.py
@@ -1,18 +1,26 @@
import sys
import py
from _pytest.assertion.util import BuiltinAssertionError
+u = py.builtin._totext
+
class AssertionError(BuiltinAssertionError):
def __init__(self, *args):
BuiltinAssertionError.__init__(self, *args)
if args:
+ # on Python2.6 we get len(args)==2 for: assert 0, (x,y)
+ # on Python2.7 and above we always get len(args) == 1
+ # with args[0] being the (x,y) tuple.
+ if len(args) > 1:
+ toprint = args
+ else:
+ toprint = args[0]
try:
- self.msg = str(args[0])
- except py.builtin._sysex:
- raise
- except:
- self.msg = "<[broken __repr__] %s at %0xd>" %(
- args[0].__class__, id(args[0]))
+ self.msg = u(toprint)
+ except Exception:
+ self.msg = u(
+ "<[broken __repr__] %s at %0xd>"
+ % (toprint.__class__, id(toprint)))
else:
f = py.code.Frame(sys._getframe(1))
try:
@@ -44,4 +52,3 @@
from _pytest.assertion.newinterpret import interpret as reinterpret
else:
reinterpret = reinterpret_old
-
diff --git a/_pytest/assertion/rewrite.py b/_pytest/assertion/rewrite.py
--- a/_pytest/assertion/rewrite.py
+++ b/_pytest/assertion/rewrite.py
@@ -6,6 +6,7 @@
import imp
import marshal
import os
+import re
import struct
import sys
import types
@@ -14,13 +15,7 @@
from _pytest.assertion import util
-# Windows gives ENOENT in places *nix gives ENOTDIR.
-if sys.platform.startswith("win"):
- PATH_COMPONENT_NOT_DIR = errno.ENOENT
-else:
- PATH_COMPONENT_NOT_DIR = errno.ENOTDIR
-
-# py.test caches rewritten pycs in __pycache__.
+# pytest caches rewritten pycs in __pycache__.
if hasattr(imp, "get_tag"):
PYTEST_TAG = imp.get_tag() + "-PYTEST"
else:
@@ -34,17 +29,19 @@
PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1])
del ver, impl
-PYC_EXT = ".py" + "c" if __debug__ else "o"
+PYC_EXT = ".py" + (__debug__ and "c" or "o")
PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2)
+ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3
class AssertionRewritingHook(object):
- """Import hook which rewrites asserts."""
+ """PEP302 Import hook which rewrites asserts."""
def __init__(self):
self.session = None
self.modules = {}
+ self._register_with_pkg_resources()
def set_session(self, session):
self.fnpats = session.config.getini("python_files")
@@ -59,8 +56,12 @@
names = name.rsplit(".", 1)
lastname = names[-1]
pth = None
- if path is not None and len(path) == 1:
- pth = path[0]
+ if path is not None:
+ # Starting with Python 3.3, path is a _NamespacePath(), which
+ # causes problems if not converted to list.
+ path = list(path)
+ if len(path) == 1:
+ pth = path[0]
if pth is None:
try:
fd, fn, desc = imp.find_module(lastname, path)
@@ -95,12 +96,13 @@
finally:
self.session = sess
else:
- state.trace("matched test file (was specified on cmdline): %r" % (fn,))
+ state.trace("matched test file (was specified on cmdline): %r" %
+ (fn,))
# The requested module looks like a test file, so rewrite it. This is
# the most magical part of the process: load the source, rewrite the
# asserts, and load the rewritten source. We also cache the rewritten
# module code in a special pyc. We must be aware of the possibility of
- # concurrent py.test processes rewriting and loading pycs. To avoid
+ # concurrent pytest processes rewriting and loading pycs. To avoid
# tricky race conditions, we maintain the following invariant: The
# cached pyc is always a complete, valid pyc. Operations on it must be
# atomic. POSIX's atomic rename comes in handy.
@@ -116,19 +118,19 @@
# common case) or it's blocked by a non-dir node. In the
# latter case, we'll ignore it in _write_pyc.
pass
- elif e == PATH_COMPONENT_NOT_DIR:
+ elif e in [errno.ENOENT, errno.ENOTDIR]:
# One of the path components was not a directory, likely
# because we're in a zip file.
write = False
elif e == errno.EACCES:
- state.trace("read only directory: %r" % (fn_pypath.dirname,))
+ state.trace("read only directory: %r" % fn_pypath.dirname)
write = False
else:
raise
cache_name = fn_pypath.basename[:-3] + PYC_TAIL
pyc = os.path.join(cache_dir, cache_name)
- # Notice that even if we're in a read-only directory, I'm going to check
- # for a cached pyc. This may not be optimal...
+ # Notice that even if we're in a read-only directory, I'm going
+ # to check for a cached pyc. This may not be optimal...
co = _read_pyc(fn_pypath, pyc)
if co is None:
state.trace("rewriting %r" % (fn,))
@@ -153,27 +155,59 @@
mod.__file__ = co.co_filename
# Normally, this attribute is 3.2+.
mod.__cached__ = pyc
+ mod.__loader__ = self
py.builtin.exec_(co, mod.__dict__)
except:
del sys.modules[name]
raise
return sys.modules[name]
-def _write_pyc(co, source_path, pyc):
- # Technically, we don't have to have the same pyc format as (C)Python, since
- # these "pycs" should never be seen by builtin import. However, there's
- # little reason deviate, and I hope sometime to be able to use
- # imp.load_compiled to load them. (See the comment in load_module above.)
+
+
+ def is_package(self, name):
+ try:
+ fd, fn, desc = imp.find_module(name)
+ except ImportError:
+ return False
+ if fd is not None:
+ fd.close()
+ tp = desc[2]
+ return tp == imp.PKG_DIRECTORY
+
+ @classmethod
+ def _register_with_pkg_resources(cls):
+ """
+ Ensure package resources can be loaded from this loader. May be called
+ multiple times, as the operation is idempotent.
+ """
+ try:
+ import pkg_resources
+ # access an attribute in case a deferred importer is present
+ pkg_resources.__name__
+ except ImportError:
+ return
+
+ # Since pytest tests are always located in the file system, the
+ # DefaultProvider is appropriate.
+ pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider)
+
+
+def _write_pyc(state, co, source_path, pyc):
+ # Technically, we don't have to have the same pyc format as
+ # (C)Python, since these "pycs" should never be seen by builtin
+ # import. However, there's little reason deviate, and I hope
+ # sometime to be able to use imp.load_compiled to load them. (See
+ # the comment in load_module above.)
mtime = int(source_path.mtime())
try:
fp = open(pyc, "wb")
except IOError:
err = sys.exc_info()[1].errno
- if err == PATH_COMPONENT_NOT_DIR:
- # This happens when we get a EEXIST in find_module creating the
- # __pycache__ directory and __pycache__ is by some non-dir node.
- return False
- raise
+ state.trace("error writing pyc file at %s: errno=%s" %(pyc, err))
+ # we ignore any failure to write the cache file
+ # there are many reasons, permission-denied, __pycache__ being a
+ # file etc.
+ return False
try:
fp.write(imp.get_magic())
fp.write(struct.pack(">",
- ast.Add : "+",
- ast.Sub : "-",
- ast.Mult : "*",
- ast.Div : "/",
- ast.FloorDiv : "//",
- ast.Mod : "%",
- ast.Eq : "==",
- ast.NotEq : "!=",
- ast.Lt : "<",
- ast.LtE : "<=",
- ast.Gt : ">",
- ast.GtE : ">=",
- ast.Pow : "**",
- ast.Is : "is",
- ast.IsNot : "is not",
- ast.In : "in",
- ast.NotIn : "not in"
+ ast.BitOr: "|",
+ ast.BitXor: "^",
+ ast.BitAnd: "&",
+ ast.LShift: "<<",
+ ast.RShift: ">>",
+ ast.Add: "+",
+ ast.Sub: "-",
+ ast.Mult: "*",
+ ast.Div: "/",
+ ast.FloorDiv: "//",
+ ast.Mod: "%%", # escaped for string formatting
+ ast.Eq: "==",
+ ast.NotEq: "!=",
+ ast.Lt: "<",
+ ast.LtE: "<=",
+ ast.Gt: ">",
+ ast.GtE: ">=",
+ ast.Pow: "**",
+ ast.Is: "is",
+ ast.IsNot: "is not",
+ ast.In: "in",
+ ast.NotIn: "not in"
}
@@ -341,7 +408,7 @@
lineno = 0
for item in mod.body:
if (expect_docstring and isinstance(item, ast.Expr) and
- isinstance(item.value, ast.Str)):
+ isinstance(item.value, ast.Str)):
doc = item.value.s
if "PYTEST_DONT_REWRITE" in doc:
# The module has disabled assertion rewriting.
@@ -462,7 +529,8 @@
body.append(raise_)
# Clear temporary variables by setting them to None.
if self.variables:
- variables = [ast.Name(name, ast.Store()) for name in self.variables]
+ variables = [ast.Name(name, ast.Store())
+ for name in self.variables]
clear = ast.Assign(variables, ast.Name("None", ast.Load()))
self.statements.append(clear)
# Fix line numbers.
@@ -471,11 +539,12 @@
return self.statements
def visit_Name(self, name):
- # Check if the name is local or not.
+ # Display the repr of the name if it's a local variable or
+ # _should_repr_global_name() thinks it's acceptable.
locs = ast.Call(self.builtin("locals"), [], [], None, None)
- globs = ast.Call(self.builtin("globals"), [], [], None, None)
- ops = [ast.In(), ast.IsNot()]
- test = ast.Compare(ast.Str(name.id), ops, [locs, globs])
+ inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
+ dorepr = self.helper("should_repr_global_name", name)
+ test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
return name, self.explanation_param(expr)
@@ -492,7 +561,8 @@
for i, v in enumerate(boolop.values):
if i:
fail_inner = []
- self.on_failure.append(ast.If(cond, fail_inner, []))
+ # cond is set in a prior loop iteration below
+ self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa
self.on_failure = fail_inner
self.push_format_context()
res, expl = self.visit(v)
@@ -548,7 +618,8 @@
new_kwarg, expl = self.visit(call.kwargs)
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
- new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg)
+ new_call = ast.Call(new_func, new_args, new_kwargs,
+ new_star, new_kwarg)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
@@ -584,7 +655,7 @@
res_expr = ast.Compare(left_res, [op], [next_res])
self.statements.append(ast.Assign([store_names[i]], res_expr))
left_res, left_expl = next_res, next_expl
- # Use py.code._reprcompare if that's available.
+ # Use pytest.assertion.util._reprcompare if that's available.
expl_call = self.helper("call_reprcompare",
ast.Tuple(syms, ast.Load()),
ast.Tuple(load_names, ast.Load()),
diff --git a/_pytest/assertion/util.py b/_pytest/assertion/util.py
--- a/_pytest/assertion/util.py
+++ b/_pytest/assertion/util.py
@@ -1,8 +1,13 @@
"""Utilities for assertion debugging"""
import py
+try:
+ from collections import Sequence
+except ImportError:
+ Sequence = list
BuiltinAssertionError = py.builtin.builtins.AssertionError
+u = py.builtin._totext
# The _reprcompare attribute on the util module is used by the new assertion
# interpretation code and assertion rewriter to detect this plugin was
@@ -10,6 +15,7 @@
# DebugInterpreter.
_reprcompare = None
+
def format_explanation(explanation):
"""This formats an explanation
@@ -20,7 +26,18 @@
for when one explanation needs to span multiple lines, e.g. when
displaying diffs.
"""
- # simplify 'assert False where False = ...'
+ explanation = _collapse_false(explanation)
+ lines = _split_explanation(explanation)
+ result = _format_lines(lines)
+ return u('\n').join(result)
+
+
+def _collapse_false(explanation):
+ """Collapse expansions of False
+
+ So this strips out any "assert False\n{where False = ...\n}"
+ blocks.
+ """
where = 0
while True:
start = where = explanation.find("False\n{False = ", where)
@@ -42,28 +59,48 @@
explanation = (explanation[:start] + explanation[start+15:end-1] +
explanation[end+1:])
where -= 17
- raw_lines = (explanation or '').split('\n')
- # escape newlines not followed by {, } and ~
+ return explanation
+
+
+def _split_explanation(explanation):
+ """Return a list of individual lines in the explanation
+
+ This will return a list of lines split on '\n{', '\n}' and '\n~'.
+ Any other newlines will be escaped and appear in the line as the
+ literal '\n' characters.
+ """
+ raw_lines = (explanation or u('')).split('\n')
lines = [raw_lines[0]]
for l in raw_lines[1:]:
if l.startswith('{') or l.startswith('}') or l.startswith('~'):
lines.append(l)
else:
lines[-1] += '\\n' + l
+ return lines
+
+def _format_lines(lines):
+ """Format the individual lines
+
+ This will replace the '{', '}' and '~' characters of our mini
+ formatting language with the proper 'where ...', 'and ...' and ' +
+ ...' text, taking care of indentation along the way.
+
+ Return a list of formatted lines.
+ """
result = lines[:1]
stack = [0]
stackcnt = [0]
for line in lines[1:]:
if line.startswith('{'):
if stackcnt[-1]:
- s = 'and '
+ s = u('and ')
else:
- s = 'where '
+ s = u('where ')
stack.append(len(result))
stackcnt[-1] += 1
stackcnt.append(0)
- result.append(' +' + ' '*(len(stack)-1) + s + line[1:])
+ result.append(u(' +') + u(' ')*(len(stack)-1) + s + line[1:])
elif line.startswith('}'):
assert line.startswith('}')
stack.pop()
@@ -71,9 +108,9 @@
result[stack[-1]] += line[1:]
else:
assert line.startswith('~')
- result.append(' '*len(stack) + line[1:])
+ result.append(u(' ')*len(stack) + line[1:])
assert len(stack) == 1
- return '\n'.join(result)
+ return result
# Provide basestring in python3
@@ -83,132 +120,163 @@
basestring = str
-def assertrepr_compare(op, left, right):
- """return specialised explanations for some operators/operands"""
- width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
+def assertrepr_compare(config, op, left, right):
+ """Return specialised explanations for some operators/operands"""
+ width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
left_repr = py.io.saferepr(left, maxsize=int(width/2))
right_repr = py.io.saferepr(right, maxsize=width-len(left_repr))
- summary = '%s %s %s' % (left_repr, op, right_repr)
+ summary = u('%s %s %s') % (left_repr, op, right_repr)
- issequence = lambda x: isinstance(x, (list, tuple))
+ issequence = lambda x: (isinstance(x, (list, tuple, Sequence))
+ and not isinstance(x, basestring))
istext = lambda x: isinstance(x, basestring)
isdict = lambda x: isinstance(x, dict)
- isset = lambda x: isinstance(x, set)
+ isset = lambda x: isinstance(x, (set, frozenset))
+ verbose = config.getoption('verbose')
explanation = None
try:
if op == '==':
if istext(left) and istext(right):
- explanation = _diff_text(left, right)
+ explanation = _diff_text(left, right, verbose)
elif issequence(left) and issequence(right):
- explanation = _compare_eq_sequence(left, right)
+ explanation = _compare_eq_sequence(left, right, verbose)
elif isset(left) and isset(right):
- explanation = _compare_eq_set(left, right)
+ explanation = _compare_eq_set(left, right, verbose)
elif isdict(left) and isdict(right):
- explanation = _diff_text(py.std.pprint.pformat(left),
- py.std.pprint.pformat(right))
+ explanation = _compare_eq_dict(left, right, verbose)
elif op == 'not in':
if istext(left) and istext(right):
- explanation = _notin_text(left, right)
- except py.builtin._sysex:
- raise
- except:
+ explanation = _notin_text(left, right, verbose)
+ except Exception:
excinfo = py.code.ExceptionInfo()
- explanation = ['(pytest_assertion plugin: representation of '
- 'details failed. Probably an object has a faulty __repr__.)',
- str(excinfo)
- ]
-
+ explanation = [
+ u('(pytest_assertion plugin: representation of details failed. '
+ 'Probably an object has a faulty __repr__.)'),
+ u(excinfo)]
if not explanation:
return None
- # Don't include pageloads of data, should be configurable
- if len(''.join(explanation)) > 80*8:
- explanation = ['Detailed information too verbose, truncated']
-
return [summary] + explanation
-def _diff_text(left, right):
- """Return the explanation for the diff between text
+def _diff_text(left, right, verbose=False):
+ """Return the explanation for the diff between text or bytes
- This will skip leading and trailing characters which are
- identical to keep the diff minimal.
+ Unless --verbose is used this will skip leading and trailing
+ characters which are identical to keep the diff minimal.
+
+ If the input are bytes they will be safely converted to text.
"""
explanation = []
- i = 0 # just in case left or right has zero length
- for i in range(min(len(left), len(right))):
- if left[i] != right[i]:
- break
- if i > 42:
- i -= 10 # Provide some context
- explanation = ['Skipping %s identical '
- 'leading characters in diff' % i]
- left = left[i:]
- right = right[i:]
- if len(left) == len(right):
- for i in range(len(left)):
- if left[-i] != right[-i]:
+ if isinstance(left, py.builtin.bytes):
+ left = u(repr(left)[1:-1]).replace(r'\n', '\n')
+ if isinstance(right, py.builtin.bytes):
+ right = u(repr(right)[1:-1]).replace(r'\n', '\n')
+ if not verbose:
+ i = 0 # just in case left or right has zero length
+ for i in range(min(len(left), len(right))):
+ if left[i] != right[i]:
break
if i > 42:
- i -= 10 # Provide some context
- explanation += ['Skipping %s identical '
- 'trailing characters in diff' % i]
- left = left[:-i]
- right = right[:-i]
+ i -= 10 # Provide some context
+ explanation = [u('Skipping %s identical leading '
+ 'characters in diff, use -v to show') % i]
+ left = left[i:]
+ right = right[i:]
+ if len(left) == len(right):
+ for i in range(len(left)):
+ if left[-i] != right[-i]:
+ break
+ if i > 42:
+ i -= 10 # Provide some context
+ explanation += [u('Skipping %s identical trailing '
+ 'characters in diff, use -v to show') % i]
+ left = left[:-i]
+ right = right[:-i]
explanation += [line.strip('\n')
for line in py.std.difflib.ndiff(left.splitlines(),
right.splitlines())]
return explanation
-def _compare_eq_sequence(left, right):
+def _compare_eq_sequence(left, right, verbose=False):
explanation = []
for i in range(min(len(left), len(right))):
if left[i] != right[i]:
- explanation += ['At index %s diff: %r != %r' %
- (i, left[i], right[i])]
+ explanation += [u('At index %s diff: %r != %r')
+ % (i, left[i], right[i])]
break
if len(left) > len(right):
- explanation += ['Left contains more items, '
- 'first extra item: %s' % py.io.saferepr(left[len(right)],)]
+ explanation += [u('Left contains more items, first extra item: %s')
+ % py.io.saferepr(left[len(right)],)]
elif len(left) < len(right):
- explanation += ['Right contains more items, '
- 'first extra item: %s' % py.io.saferepr(right[len(left)],)]
- return explanation # + _diff_text(py.std.pprint.pformat(left),
- # py.std.pprint.pformat(right))
+ explanation += [
+ u('Right contains more items, first extra item: %s') %
+ py.io.saferepr(right[len(left)],)]
+ return explanation # + _diff_text(py.std.pprint.pformat(left),
+ # py.std.pprint.pformat(right))
-def _compare_eq_set(left, right):
+def _compare_eq_set(left, right, verbose=False):
explanation = []
diff_left = left - right
diff_right = right - left
if diff_left:
- explanation.append('Extra items in the left set:')
+ explanation.append(u('Extra items in the left set:'))
for item in diff_left:
explanation.append(py.io.saferepr(item))
if diff_right:
- explanation.append('Extra items in the right set:')
+ explanation.append(u('Extra items in the right set:'))
for item in diff_right:
explanation.append(py.io.saferepr(item))
return explanation
-def _notin_text(term, text):
+def _compare_eq_dict(left, right, verbose=False):
+ explanation = []
+ common = set(left).intersection(set(right))
+ same = dict((k, left[k]) for k in common if left[k] == right[k])
+ if same and not verbose:
+ explanation += [u('Omitting %s identical items, use -v to show') %
+ len(same)]
+ elif same:
+ explanation += [u('Common items:')]
+ explanation += py.std.pprint.pformat(same).splitlines()
+ diff = set(k for k in common if left[k] != right[k])
+ if diff:
+ explanation += [u('Differing items:')]
+ for k in diff:
+ explanation += [py.io.saferepr({k: left[k]}) + ' != ' +
+ py.io.saferepr({k: right[k]})]
+ extra_left = set(left) - set(right)
+ if extra_left:
+ explanation.append(u('Left contains more items:'))
+ explanation.extend(py.std.pprint.pformat(
+ dict((k, left[k]) for k in extra_left)).splitlines())
+ extra_right = set(right) - set(left)
+ if extra_right:
+ explanation.append(u('Right contains more items:'))
+ explanation.extend(py.std.pprint.pformat(
+ dict((k, right[k]) for k in extra_right)).splitlines())
+ return explanation
+
+
+def _notin_text(term, text, verbose=False):
index = text.find(term)
head = text[:index]
tail = text[index+len(term):]
correct_text = head + tail
- diff = _diff_text(correct_text, text)
- newdiff = ['%s is contained here:' % py.io.saferepr(term, maxsize=42)]
+ diff = _diff_text(correct_text, text, verbose)
+ newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)]
for line in diff:
- if line.startswith('Skipping'):
+ if line.startswith(u('Skipping')):
continue
- if line.startswith('- '):
+ if line.startswith(u('- ')):
continue
- if line.startswith('+ '):
- newdiff.append(' ' + line[2:])
+ if line.startswith(u('+ ')):
+ newdiff.append(u(' ') + line[2:])
else:
newdiff.append(line)
return newdiff
diff --git a/_pytest/capture.py b/_pytest/capture.py
--- a/_pytest/capture.py
+++ b/_pytest/capture.py
@@ -1,43 +1,114 @@
-""" per-test stdout/stderr capturing mechanisms, ``capsys`` and ``capfd`` function arguments. """
+"""
+ per-test stdout/stderr capturing mechanisms,
+ ``capsys`` and ``capfd`` function arguments.
+"""
+# note: py.io capture was where copied from
+# pylib 1.4.20.dev2 (rev 13d9af95547e)
+import sys
+import os
+import tempfile
-import pytest, py
-import os
+import py
+import pytest
+
+try:
+ from io import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+try:
+ from io import BytesIO
+except ImportError:
+ class BytesIO(StringIO):
+ def write(self, data):
+ if isinstance(data, unicode):
+ raise TypeError("not a byte value: %r" % (data,))
+ StringIO.write(self, data)
+
+if sys.version_info < (3, 0):
+ class TextIO(StringIO):
+ def write(self, data):
+ if not isinstance(data, unicode):
+ enc = getattr(self, '_encoding', 'UTF-8')
+ data = unicode(data, enc, 'replace')
+ StringIO.write(self, data)
+else:
+ TextIO = StringIO
+
+
+patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'}
+
def pytest_addoption(parser):
group = parser.getgroup("general")
- group._addoption('--capture', action="store", default=None,
- metavar="method", type="choice", choices=['fd', 'sys', 'no'],
+ group._addoption(
+ '--capture', action="store", default=None,
+ metavar="method", choices=['fd', 'sys', 'no'],
help="per-test capturing method: one of fd (default)|sys|no.")
- group._addoption('-s', action="store_const", const="no", dest="capture",
+ group._addoption(
+ '-s', action="store_const", const="no", dest="capture",
help="shortcut for --capture=no.")
+
@pytest.mark.tryfirst
-def pytest_cmdline_parse(pluginmanager, args):
- # we want to perform capturing already for plugin/conftest loading
- if '-s' in args or "--capture=no" in args:
- method = "no"
- elif hasattr(os, 'dup') and '--capture=sys' not in args:
+def pytest_load_initial_conftests(early_config, parser, args, __multicall__):
+ ns = parser.parse_known_args(args)
+ method = ns.capture
+ if not method:
method = "fd"
- else:
+ if method == "fd" and not hasattr(os, "dup"):
method = "sys"
capman = CaptureManager(method)
- pluginmanager.register(capman, "capturemanager")
+ early_config.pluginmanager.register(capman, "capturemanager")
+
+ # make sure that capturemanager is properly reset at final shutdown
+ def teardown():
+ try:
+ capman.reset_capturings()
+ except ValueError:
+ pass
+
+ early_config.pluginmanager.add_shutdown(teardown)
+
+ # make sure logging does not raise exceptions at the end
+ def silence_logging_at_shutdown():
+ if "logging" in sys.modules:
+ sys.modules["logging"].raiseExceptions = False
+ early_config.pluginmanager.add_shutdown(silence_logging_at_shutdown)
+
+ # finally trigger conftest loading but while capturing (issue93)
+ capman.resumecapture()
+ try:
+ try:
+ return __multicall__.execute()
+ finally:
+ out, err = capman.suspendcapture()
+ except:
+ sys.stdout.write(out)
+ sys.stderr.write(err)
+ raise
+
def addouterr(rep, outerr):
for secname, content in zip(["out", "err"], outerr):
if content:
rep.sections.append(("Captured std%s" % secname, content))
+
class NoCapture:
def startall(self):
pass
+
def resume(self):
pass
+
def reset(self):
pass
+
def suspend(self):
return "", ""
+
class CaptureManager:
def __init__(self, defaultmethod=None):
self._method2capture = {}
@@ -45,21 +116,23 @@
def _maketempfile(self):
f = py.std.tempfile.TemporaryFile()
- newf = py.io.dupfile(f, encoding="UTF-8")
+ newf = dupfile(f, encoding="UTF-8")
f.close()
return newf
def _makestringio(self):
- return py.io.TextIO()
+ return TextIO()
def _getcapture(self, method):
if method == "fd":
- return py.io.StdCaptureFD(now=False,
- out=self._maketempfile(), err=self._maketempfile()
+ return StdCaptureFD(
+ out=self._maketempfile(),
+ err=self._maketempfile(),
)
elif method == "sys":
- return py.io.StdCapture(now=False,
- out=self._makestringio(), err=self._makestringio()
+ return StdCapture(
+ out=self._makestringio(),
+ err=self._makestringio(),
)
elif method == "no":
return NoCapture()
@@ -74,23 +147,24 @@
method = config._conftest.rget("option_capture", path=fspath)
except KeyError:
method = "fd"
- if method == "fd" and not hasattr(os, 'dup'): # e.g. jython
+ if method == "fd" and not hasattr(os, 'dup'): # e.g. jython
method = "sys"
return method
def reset_capturings(self):
- for name, cap in self._method2capture.items():
+ for cap in self._method2capture.values():
cap.reset()
def resumecapture_item(self, item):
method = self._getmethod(item.config, item.fspath)
if not hasattr(item, 'outerr'):
- item.outerr = ('', '') # we accumulate outerr on the item
+ item.outerr = ('', '') # we accumulate outerr on the item
return self.resumecapture(method)
def resumecapture(self, method=None):
if hasattr(self, '_capturing'):
- raise ValueError("cannot resume, already capturing with %r" %
+ raise ValueError(
+ "cannot resume, already capturing with %r" %
(self._capturing,))
if method is None:
method = self._defaultmethod
@@ -119,30 +193,29 @@
return "", ""
def activate_funcargs(self, pyfuncitem):
- if not hasattr(pyfuncitem, 'funcargs'):
- return
- assert not hasattr(self, '_capturing_funcargs')
- self._capturing_funcargs = capturing_funcargs = []
- for name, capfuncarg in pyfuncitem.funcargs.items():
- if name in ('capsys', 'capfd'):
- capturing_funcargs.append(capfuncarg)
- capfuncarg._start()
+ funcargs = getattr(pyfuncitem, "funcargs", None)
+ if funcargs is not None:
+ for name, capfuncarg in funcargs.items():
+ if name in ('capsys', 'capfd'):
+ assert not hasattr(self, '_capturing_funcarg')
+ self._capturing_funcarg = capfuncarg
+ capfuncarg._start()
def deactivate_funcargs(self):
- capturing_funcargs = getattr(self, '_capturing_funcargs', None)
- if capturing_funcargs is not None:
- while capturing_funcargs:
- capfuncarg = capturing_funcargs.pop()
- capfuncarg._finalize()
- del self._capturing_funcargs
+ capturing_funcarg = getattr(self, '_capturing_funcarg', None)
+ if capturing_funcarg:
+ outerr = capturing_funcarg._finalize()
+ del self._capturing_funcarg
+ return outerr
def pytest_make_collect_report(self, __multicall__, collector):
method = self._getmethod(collector.config, collector.fspath)
try:
self.resumecapture(method)
except ValueError:
- return # recursive collect, XXX refactor capturing
- # to allow for more lightweight recursive capturing
+ # recursive collect, XXX refactor capturing
+ # to allow for more lightweight recursive capturing
+ return
try:
rep = __multicall__.execute()
finally:
@@ -169,46 +242,371 @@
@pytest.mark.tryfirst
def pytest_runtest_makereport(self, __multicall__, item, call):
- self.deactivate_funcargs()
+ funcarg_outerr = self.deactivate_funcargs()
rep = __multicall__.execute()
outerr = self.suspendcapture(item)
- if not rep.passed:
- addouterr(rep, outerr)
+ if funcarg_outerr is not None:
+ outerr = (outerr[0] + funcarg_outerr[0],
+ outerr[1] + funcarg_outerr[1])
+ addouterr(rep, outerr)
if not rep.passed or rep.when == "teardown":
outerr = ('', '')
item.outerr = outerr
return rep
+error_capsysfderror = "cannot use capsys and capfd at the same time"
+
+
def pytest_funcarg__capsys(request):
"""enables capturing of writes to sys.stdout/sys.stderr and makes
captured output available via ``capsys.readouterr()`` method calls
which return a ``(out, err)`` tuple.
"""
- return CaptureFuncarg(py.io.StdCapture)
+ if "capfd" in request._funcargs:
+ raise request.raiseerror(error_capsysfderror)
+ return CaptureFixture(StdCapture)
+
def pytest_funcarg__capfd(request):
"""enables capturing of writes to file descriptors 1 and 2 and makes
captured output available via ``capsys.readouterr()`` method calls
which return a ``(out, err)`` tuple.
"""
+ if "capsys" in request._funcargs:
+ request.raiseerror(error_capsysfderror)
if not hasattr(os, 'dup'):
- py.test.skip("capfd funcarg needs os.dup")
- return CaptureFuncarg(py.io.StdCaptureFD)
+ pytest.skip("capfd funcarg needs os.dup")
+ return CaptureFixture(StdCaptureFD)
-class CaptureFuncarg:
+
+class CaptureFixture:
def __init__(self, captureclass):
- self.capture = captureclass(now=False)
+ self._capture = captureclass()
def _start(self):
- self.capture.startall()
+ self._capture.startall()
def _finalize(self):
- if hasattr(self, 'capture'):
- self.capture.reset()
- del self.capture
+ if hasattr(self, '_capture'):
+ outerr = self._outerr = self._capture.reset()
+ del self._capture
+ return outerr
def readouterr(self):
- return self.capture.readouterr()
+ try:
+ return self._capture.readouterr()
+ except AttributeError:
+ return self._outerr
def close(self):
self._finalize()
+
+
+class FDCapture:
+ """ Capture IO to/from a given os-level filedescriptor. """
+
+ def __init__(self, targetfd, tmpfile=None, patchsys=False):
+ """ save targetfd descriptor, and open a new
+ temporary file there. If no tmpfile is
+ specified a tempfile.Tempfile() will be opened
+ in text mode.
+ """
+ self.targetfd = targetfd
+ if tmpfile is None and targetfd != 0:
+ f = tempfile.TemporaryFile('wb+')
+ tmpfile = dupfile(f, encoding="UTF-8")
+ f.close()
+ self.tmpfile = tmpfile
+ self._savefd = os.dup(self.targetfd)
+ if patchsys:
+ self._oldsys = getattr(sys, patchsysdict[targetfd])
+
+ def start(self):
+ try:
+ os.fstat(self._savefd)
+ except OSError:
+ raise ValueError(
+ "saved filedescriptor not valid, "
+ "did you call start() twice?")
+ if self.targetfd == 0 and not self.tmpfile:
+ fd = os.open(os.devnull, os.O_RDONLY)
+ os.dup2(fd, 0)
+ os.close(fd)
+ if hasattr(self, '_oldsys'):
+ setattr(sys, patchsysdict[self.targetfd], DontReadFromInput())
+ else:
+ os.dup2(self.tmpfile.fileno(), self.targetfd)
+ if hasattr(self, '_oldsys'):
+ setattr(sys, patchsysdict[self.targetfd], self.tmpfile)
+
+ def done(self):
+ """ unpatch and clean up, returns the self.tmpfile (file object)
+ """
+ os.dup2(self._savefd, self.targetfd)
+ os.close(self._savefd)
+ if self.targetfd != 0:
+ self.tmpfile.seek(0)
+ if hasattr(self, '_oldsys'):
+ setattr(sys, patchsysdict[self.targetfd], self._oldsys)
+ return self.tmpfile
+
+ def writeorg(self, data):
+ """ write a string to the original file descriptor
+ """
+ tempfp = tempfile.TemporaryFile()
+ try:
+ os.dup2(self._savefd, tempfp.fileno())
+ tempfp.write(data)
+ finally:
+ tempfp.close()
+
+
+def dupfile(f, mode=None, buffering=0, raising=False, encoding=None):
+ """ return a new open file object that's a duplicate of f
+
+ mode is duplicated if not given, 'buffering' controls
+ buffer size (defaulting to no buffering) and 'raising'
+ defines whether an exception is raised when an incompatible
+ file object is passed in (if raising is False, the file
+ object itself will be returned)
+ """
+ try:
+ fd = f.fileno()
+ mode = mode or f.mode
+ except AttributeError:
+ if raising:
+ raise
+ return f
+ newfd = os.dup(fd)
+ if sys.version_info >= (3, 0):
+ if encoding is not None:
+ mode = mode.replace("b", "")
+ buffering = True
+ return os.fdopen(newfd, mode, buffering, encoding, closefd=True)
+ else:
+ f = os.fdopen(newfd, mode, buffering)
+ if encoding is not None:
+ return EncodedFile(f, encoding)
+ return f
+
+
+class EncodedFile(object):
+ def __init__(self, _stream, encoding):
+ self._stream = _stream
+ self.encoding = encoding
+
+ def write(self, obj):
+ if isinstance(obj, unicode):
+ obj = obj.encode(self.encoding)
+ self._stream.write(obj)
+
+ def writelines(self, linelist):
+ data = ''.join(linelist)
+ self.write(data)
+
+ def __getattr__(self, name):
+ return getattr(self._stream, name)
+
+
+class Capture(object):
+ def reset(self):
+ """ reset sys.stdout/stderr and return captured output as strings. """
+ if hasattr(self, '_reset'):
+ raise ValueError("was already reset")
+ self._reset = True
+ outfile, errfile = self.done(save=False)
+ out, err = "", ""
+ if outfile and not outfile.closed:
+ out = outfile.read()
+ outfile.close()
+ if errfile and errfile != outfile and not errfile.closed:
+ err = errfile.read()
+ errfile.close()
+ return out, err
+
+ def suspend(self):
+ """ return current snapshot captures, memorize tempfiles. """
+ outerr = self.readouterr()
+ outfile, errfile = self.done()
+ return outerr
+
+
+class StdCaptureFD(Capture):
+ """ This class allows to capture writes to FD1 and FD2
+ and may connect a NULL file to FD0 (and prevent
+ reads from sys.stdin). If any of the 0,1,2 file descriptors
+ is invalid it will not be captured.
+ """
+ def __init__(self, out=True, err=True, in_=True, patchsys=True):
+ self._options = {
+ "out": out,
+ "err": err,
+ "in_": in_,
+ "patchsys": patchsys,
+ }
+ self._save()
+
+ def _save(self):
+ in_ = self._options['in_']
+ out = self._options['out']
+ err = self._options['err']
+ patchsys = self._options['patchsys']
+ if in_:
+ try:
+ self.in_ = FDCapture(
+ 0, tmpfile=None,
+ patchsys=patchsys)
+ except OSError:
+ pass
+ if out:
+ tmpfile = None
+ if hasattr(out, 'write'):
+ tmpfile = out
+ try:
+ self.out = FDCapture(
+ 1, tmpfile=tmpfile,
+ patchsys=patchsys)
+ self._options['out'] = self.out.tmpfile
+ except OSError:
+ pass
+ if err:
+ if hasattr(err, 'write'):
+ tmpfile = err
+ else:
+ tmpfile = None
+ try:
+ self.err = FDCapture(
+ 2, tmpfile=tmpfile,
+ patchsys=patchsys)
+ self._options['err'] = self.err.tmpfile
+ except OSError:
+ pass
+
+ def startall(self):
+ if hasattr(self, 'in_'):
+ self.in_.start()
+ if hasattr(self, 'out'):
+ self.out.start()
+ if hasattr(self, 'err'):
+ self.err.start()
+
+ def resume(self):
+ """ resume capturing with original temp files. """
+ self.startall()
+
+ def done(self, save=True):
+ """ return (outfile, errfile) and stop capturing. """
+ outfile = errfile = None
+ if hasattr(self, 'out') and not self.out.tmpfile.closed:
+ outfile = self.out.done()
+ if hasattr(self, 'err') and not self.err.tmpfile.closed:
+ errfile = self.err.done()
+ if hasattr(self, 'in_'):
+ self.in_.done()
+ if save:
+ self._save()
+ return outfile, errfile
+
+ def readouterr(self):
+ """ return snapshot value of stdout/stderr capturings. """
+ out = self._readsnapshot('out')
+ err = self._readsnapshot('err')
+ return out, err
+
+ def _readsnapshot(self, name):
+ if hasattr(self, name):
+ f = getattr(self, name).tmpfile
+ else:
+ return ''
+
+ f.seek(0)
+ res = f.read()
+ enc = getattr(f, "encoding", None)
+ if enc:
+ res = py.builtin._totext(res, enc, "replace")
+ f.truncate(0)
+ f.seek(0)
+ return res
+
+
+class StdCapture(Capture):
+ """ This class allows to capture writes to sys.stdout|stderr "in-memory"
+ and will raise errors on tries to read from sys.stdin. It only
+ modifies sys.stdout|stderr|stdin attributes and does not
+ touch underlying File Descriptors (use StdCaptureFD for that).
+ """
+ def __init__(self, out=True, err=True, in_=True):
+ self._oldout = sys.stdout
+ self._olderr = sys.stderr
+ self._oldin = sys.stdin
+ if out and not hasattr(out, 'file'):
+ out = TextIO()
+ self.out = out
+ if err:
+ if not hasattr(err, 'write'):
+ err = TextIO()
+ self.err = err
+ self.in_ = in_
+
+ def startall(self):
+ if self.out:
+ sys.stdout = self.out
+ if self.err:
+ sys.stderr = self.err
+ if self.in_:
+ sys.stdin = self.in_ = DontReadFromInput()
+
+ def done(self, save=True):
+ """ return (outfile, errfile) and stop capturing. """
+ outfile = errfile = None
+ if self.out and not self.out.closed:
+ sys.stdout = self._oldout
+ outfile = self.out
+ outfile.seek(0)
+ if self.err and not self.err.closed:
+ sys.stderr = self._olderr
+ errfile = self.err
+ errfile.seek(0)
+ if self.in_:
+ sys.stdin = self._oldin
+ return outfile, errfile
+
+ def resume(self):
+ """ resume capturing with original temp files. """
+ self.startall()
+
+ def readouterr(self):
+ """ return snapshot value of stdout/stderr capturings. """
+ out = err = ""
+ if self.out:
+ out = self.out.getvalue()
+ self.out.truncate(0)
+ self.out.seek(0)
+ if self.err:
+ err = self.err.getvalue()
+ self.err.truncate(0)
+ self.err.seek(0)
+ return out, err
+
+
+class DontReadFromInput:
+ """Temporary stub class. Ideally when stdin is accessed, the
+ capturing should be turned off, with possibly all data captured
+ so far sent to the screen. This should be configurable, though,
+ because in automated test runs it is better to crash than
+ hang indefinitely.
+ """
+ def read(self, *args):
+ raise IOError("reading from stdin while output is captured")
+ readline = read
+ readlines = read
+ __iter__ = read
+
+ def fileno(self):
+ raise ValueError("redirected Stdin is pseudofile, has no fileno()")
+
+ def isatty(self):
+ return False
+
+ def close(self):
+ pass
diff --git a/_pytest/config.py b/_pytest/config.py
--- a/_pytest/config.py
+++ b/_pytest/config.py
@@ -1,25 +1,91 @@
""" command line options, ini-file and conftest.py processing. """
import py
+# DON't import pytest here because it causes import cycle troubles
import sys, os
+from _pytest import hookspec # the extension point definitions
from _pytest.core import PluginManager
-import pytest
-def pytest_cmdline_parse(pluginmanager, args):
- config = Config(pluginmanager)
- config.parse(args)
- return config
+# pytest startup
-def pytest_unconfigure(config):
- while 1:
- try:
- fin = config._cleanup.pop()
- except IndexError:
- break
- fin()
+def main(args=None, plugins=None):
+ """ return exit code, after performing an in-process test run.
+
+ :arg args: list of command line arguments.
+
+ :arg plugins: list of plugin objects to be auto-registered during
+ initialization.
+ """
+ config = _prepareconfig(args, plugins)
+ return config.hook.pytest_cmdline_main(config=config)
+
+class cmdline: # compatibility namespace
+ main = staticmethod(main)
+
+class UsageError(Exception):
+ """ error in pytest usage or invocation"""
+
+_preinit = []
+
+default_plugins = (
+ "mark main terminal runner python pdb unittest capture skipping "
+ "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion genscript "
+ "junitxml resultlog doctest").split()
+
+def _preloadplugins():
+ assert not _preinit
+ _preinit.append(get_plugin_manager())
+
+def get_plugin_manager():
+ if _preinit:
+ return _preinit.pop(0)
+ # subsequent calls to main will create a fresh instance
+ pluginmanager = PytestPluginManager()
+ pluginmanager.config = Config(pluginmanager) # XXX attr needed?
+ for spec in default_plugins:
+ pluginmanager.import_plugin(spec)
+ return pluginmanager
+
+def _prepareconfig(args=None, plugins=None):
+ if args is None:
+ args = sys.argv[1:]
+ elif isinstance(args, py.path.local):
+ args = [str(args)]
+ elif not isinstance(args, (tuple, list)):
+ if not isinstance(args, str):
+ raise ValueError("not a string or argument list: %r" % (args,))
+ args = py.std.shlex.split(args)
+ pluginmanager = get_plugin_manager()
+ if plugins:
+ for plugin in plugins:
+ pluginmanager.register(plugin)
+ return pluginmanager.hook.pytest_cmdline_parse(
+ pluginmanager=pluginmanager, args=args)
+
+class PytestPluginManager(PluginManager):
+ def __init__(self, hookspecs=[hookspec]):
+ super(PytestPluginManager, self).__init__(hookspecs=hookspecs)
+ self.register(self)
+ if os.environ.get('PYTEST_DEBUG'):
+ err = sys.stderr
+ encoding = getattr(err, 'encoding', 'utf8')
+ try:
+ err = py.io.dupfile(err, encoding=encoding)
+ except Exception:
+ pass
+ self.trace.root.setwriter(err.write)
+
+ def pytest_configure(self, config):
+ config.addinivalue_line("markers",
+ "tryfirst: mark a hook implementation function such that the "
+ "plugin machinery will try to call it first/as early as possible.")
+ config.addinivalue_line("markers",
+ "trylast: mark a hook implementation function such that the "
+ "plugin machinery will try to call it last/as late as possible.")
+
class Parser:
- """ Parser for command line arguments. """
+ """ Parser for command line arguments and ini-file values. """
def __init__(self, usage=None, processopt=None):
self._anonymous = OptionGroup("custom options", parser=self)
@@ -35,15 +101,17 @@
if option.dest:
self._processopt(option)
- def addnote(self, note):
- self._notes.append(note)
-
def getgroup(self, name, description="", after=None):
""" get (or create) a named option Group.
- :name: unique name of the option group.
+ :name: name of the option group.
:description: long description for --help output.
:after: name of other group, used for ordering --help output.
+
+ The returned group object has an ``addoption`` method with the same
+ signature as :py:func:`parser.addoption
+ <_pytest.config.Parser.addoption>` but will be shown in the
+ respective group in the output of ``pytest. --help``.
"""
for group in self._groups:
if group.name == name:
@@ -57,33 +125,222 @@
return group
def addoption(self, *opts, **attrs):
- """ add an optparse-style option. """
+ """ register a command line option.
+
+ :opts: option names, can be short or long options.
+ :attrs: same attributes which the ``add_option()`` function of the
+ `argparse library
+ `_
+ accepts.
+
+ After command line parsing options are available on the pytest config
+ object via ``config.option.NAME`` where ``NAME`` is usually set
+ by passing a ``dest`` attribute, for example
+ ``addoption("--long", dest="NAME", ...)``.
+ """
self._anonymous.addoption(*opts, **attrs)
def parse(self, args):
- self.optparser = optparser = MyOptionParser(self)
+ from _pytest._argcomplete import try_argcomplete
+ self.optparser = self._getparser()
+ try_argcomplete(self.optparser)
+ return self.optparser.parse_args([str(x) for x in args])
+
+ def _getparser(self):
+ from _pytest._argcomplete import filescompleter
+ optparser = MyOptionParser(self)
groups = self._groups + [self._anonymous]
for group in groups:
if group.options:
desc = group.description or group.name
- optgroup = py.std.optparse.OptionGroup(optparser, desc)
- optgroup.add_options(group.options)
- optparser.add_option_group(optgroup)
- return self.optparser.parse_args([str(x) for x in args])
+ arggroup = optparser.add_argument_group(desc)
+ for option in group.options:
+ n = option.names()
+ a = option.attrs()
+ arggroup.add_argument(*n, **a)
+ # bash like autocompletion for dirs (appending '/')
+ optparser.add_argument(FILE_OR_DIR, nargs='*'
+ ).completer=filescompleter
+ return optparser
def parse_setoption(self, args, option):
- parsedoption, args = self.parse(args)
+ parsedoption = self.parse(args)
for name, value in parsedoption.__dict__.items():
setattr(option, name, value)
- return args
+ return getattr(parsedoption, FILE_OR_DIR)
+
+ def parse_known_args(self, args):
+ optparser = self._getparser()
+ args = [str(x) for x in args]
+ return optparser.parse_known_args(args)[0]
def addini(self, name, help, type=None, default=None):
- """ add an ini-file option with the given name and description. """
+ """ register an ini-file option.
+
+ :name: name of the ini-variable
+ :type: type of the variable, can be ``pathlist``, ``args`` or ``linelist``.
+ :default: default value if no ini-file option exists but is queried.
+
+ The value of ini-variables can be retrieved via a call to
+ :py:func:`config.getini(name) <_pytest.config.Config.getini>`.
+ """
assert type in (None, "pathlist", "args", "linelist")
self._inidict[name] = (help, type, default)
self._ininames.append(name)
+class ArgumentError(Exception):
+ """
+ Raised if an Argument instance is created with invalid or
+ inconsistent arguments.
+ """
+
+ def __init__(self, msg, option):
+ self.msg = msg
+ self.option_id = str(option)
+
+ def __str__(self):
+ if self.option_id:
+ return "option %s: %s" % (self.option_id, self.msg)
+ else:
+ return self.msg
+
+
+class Argument:
+ """class that mimics the necessary behaviour of py.std.optparse.Option """
+ _typ_map = {
+ 'int': int,
+ 'string': str,
+ }
+ # enable after some grace period for plugin writers
+ TYPE_WARN = False
+
+ def __init__(self, *names, **attrs):
+ """store parms in private vars for use in add_argument"""
+ self._attrs = attrs
+ self._short_opts = []
+ self._long_opts = []
+ self.dest = attrs.get('dest')
+ if self.TYPE_WARN:
+ try:
+ help = attrs['help']
+ if '%default' in help:
+ py.std.warnings.warn(
+ 'pytest now uses argparse. "%default" should be'
+ ' changed to "%(default)s" ',
+ FutureWarning,
+ stacklevel=3)
+ except KeyError:
+ pass
+ try:
+ typ = attrs['type']
+ except KeyError:
+ pass
+ else:
+ # this might raise a keyerror as well, don't want to catch that
+ if isinstance(typ, py.builtin._basestring):
+ if typ == 'choice':
+ if self.TYPE_WARN:
+ py.std.warnings.warn(
+ 'type argument to addoption() is a string %r.'
+ ' For parsearg this is optional and when supplied '
+ ' should be a type.'
+ ' (options: %s)' % (typ, names),
+ FutureWarning,
+ stacklevel=3)
+ # argparse expects a type here take it from
+ # the type of the first element
+ attrs['type'] = type(attrs['choices'][0])
+ else:
+ if self.TYPE_WARN:
+ py.std.warnings.warn(
+ 'type argument to addoption() is a string %r.'
+ ' For parsearg this should be a type.'
+ ' (options: %s)' % (typ, names),
+ FutureWarning,
+ stacklevel=3)
+ attrs['type'] = Argument._typ_map[typ]
+ # used in test_parseopt -> test_parse_defaultgetter
+ self.type = attrs['type']
+ else:
+ self.type = typ
+ try:
+ # attribute existence is tested in Config._processopt
+ self.default = attrs['default']
+ except KeyError:
+ pass
+ self._set_opt_strings(names)
+ if not self.dest:
+ if self._long_opts:
+ self.dest = self._long_opts[0][2:].replace('-', '_')
+ else:
+ try:
+ self.dest = self._short_opts[0][1:]
+ except IndexError:
+ raise ArgumentError(
+ 'need a long or short option', self)
+
+ def names(self):
+ return self._short_opts + self._long_opts
+
+ def attrs(self):
+ # update any attributes set by processopt
+ attrs = 'default dest help'.split()
+ if self.dest:
+ attrs.append(self.dest)
+ for attr in attrs:
+ try:
+ self._attrs[attr] = getattr(self, attr)
+ except AttributeError:
+ pass
+ if self._attrs.get('help'):
+ a = self._attrs['help']
+ a = a.replace('%default', '%(default)s')
+ #a = a.replace('%prog', '%(prog)s')
+ self._attrs['help'] = a
+ return self._attrs
+
+ def _set_opt_strings(self, opts):
+ """directly from optparse
+
+ might not be necessary as this is passed to argparse later on"""
+ for opt in opts:
+ if len(opt) < 2:
+ raise ArgumentError(
+ "invalid option string %r: "
+ "must be at least two characters long" % opt, self)
+ elif len(opt) == 2:
+ if not (opt[0] == "-" and opt[1] != "-"):
+ raise ArgumentError(
+ "invalid short option string %r: "
+ "must be of the form -x, (x any non-dash char)" % opt,
+ self)
+ self._short_opts.append(opt)
+ else:
+ if not (opt[0:2] == "--" and opt[2] != "-"):
+ raise ArgumentError(
+ "invalid long option string %r: "
+ "must start with --, followed by non-dash" % opt,
+ self)
+ self._long_opts.append(opt)
+
+ def __repr__(self):
+ retval = 'Argument('
+ if self._short_opts:
+ retval += '_short_opts: ' + repr(self._short_opts) + ', '
+ if self._long_opts:
+ retval += '_long_opts: ' + repr(self._long_opts) + ', '
+ retval += 'dest: ' + repr(self.dest) + ', '
+ if hasattr(self, 'type'):
+ retval += 'type: ' + repr(self.type) + ', '
+ if hasattr(self, 'default'):
+ retval += 'default: ' + repr(self.default) + ', '
+ if retval[-2:] == ', ': # always long enough to test ("Argument(" )
+ retval = retval[:-2]
+ retval += ')'
+ return retval
+
+
class OptionGroup:
def __init__(self, name, description="", parser=None):
self.name = name
@@ -92,12 +349,18 @@
self.parser = parser
def addoption(self, *optnames, **attrs):
- """ add an option to this group. """
- option = py.std.optparse.Option(*optnames, **attrs)
+ """ add an option to this group.
+
+ if a shortened version of a long option is specified it will
+ be suppressed in the help. addoption('--twowords', '--two-words')
+ results in help showing '--two-words' only, but --twowords gets
+ accepted **and** the automatic destination is in args.twowords
+ """
+ option = Argument(*optnames, **attrs)
self._addoption_instance(option, shortupper=False)
def _addoption(self, *optnames, **attrs):
- option = py.std.optparse.Option(*optnames, **attrs)
+ option = Argument(*optnames, **attrs)
self._addoption_instance(option, shortupper=True)
From noreply at buildbot.pypy.org Wed Aug 13 01:55:21 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Wed, 13 Aug 2014 01:55:21 +0200 (CEST)
Subject: [pypy-commit] pypy.org extradoc: update the values
Message-ID: <20140812235521.ABCDC1C0157@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: extradoc
Changeset: r526:44c9384d8110
Date: 2014-08-13 01:55 +0200
http://bitbucket.org/pypy/pypy.org/changeset/44c9384d8110/
Log: update the values
diff --git a/don1.html b/don1.html
--- a/don1.html
+++ b/don1.html
@@ -15,7 +15,7 @@
- $52294 of $105000 (49.8%)
+ $52304 of $105000 (49.8%)
diff --git a/don4.html b/don4.html
--- a/don4.html
+++ b/don4.html
@@ -9,7 +9,7 @@
@@ -17,7 +17,7 @@
2nd call:
- $13478 of $80000 (16.8%)
+ $13914 of $80000 (17.4%)
From noreply at buildbot.pypy.org Wed Aug 13 02:17:03 2014
From: noreply at buildbot.pypy.org (pjenvey)
Date: Wed, 13 Aug 2014 02:17:03 +0200 (CEST)
Subject: [pypy-commit] pypy py3k: fix another use of _mixin_
Message-ID: <20140813001703.79A2E1C0547@cobra.cs.uni-duesseldorf.de>
Author: Philip Jenvey
Branch: py3k
Changeset: r72783:9e37694ae6d3
Date: 2014-08-12 17:16 -0700
http://bitbucket.org/pypy/pypy/changeset/9e37694ae6d3/
Log: fix another use of _mixin_
diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py
--- a/pypy/module/cpyext/buffer.py
+++ b/pypy/module/cpyext/buffer.py
@@ -1,5 +1,6 @@
from rpython.rtyper.lltypesystem import rffi, lltype
from rpython.rlib import buffer
+from rpython.rlib.objectmodel import import_from_mixin
from pypy.module.cpyext.api import (
cpython_api, CANNOT_FAIL, Py_buffer)
from pypy.module.cpyext.pyobject import PyObject, Py_DecRef
@@ -13,7 +14,6 @@
return 1
class CBufferMixin(object):
- _mixin_ = True
def __init__(self, space, c_buf, c_len, w_obj):
self.space = space
@@ -35,7 +35,8 @@
return rffi.charpsize2str(rffi.cast(rffi.CCHARP, self.c_buf),
self.c_len)
-class CBuffer(CBufferMixin, buffer.Buffer):
+class CBuffer(buffer.Buffer):
+ import_from_mixin(CBufferMixin)
_immutable_ = True
def __del__(self):
From noreply at buildbot.pypy.org Wed Aug 13 02:17:04 2014
From: noreply at buildbot.pypy.org (pjenvey)
Date: Wed, 13 Aug 2014 02:17:04 +0200 (CEST)
Subject: [pypy-commit] pypy py3.3: merge py3k
Message-ID: <20140813001704.C59A91C0547@cobra.cs.uni-duesseldorf.de>
Author: Philip Jenvey
Branch: py3.3
Changeset: r72784:d121572ebfcd
Date: 2014-08-12 17:16 -0700
http://bitbucket.org/pypy/pypy/changeset/d121572ebfcd/
Log: merge py3k
diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py
--- a/pypy/module/cpyext/buffer.py
+++ b/pypy/module/cpyext/buffer.py
@@ -1,5 +1,6 @@
from rpython.rtyper.lltypesystem import rffi, lltype
from rpython.rlib import buffer
+from rpython.rlib.objectmodel import import_from_mixin
from pypy.module.cpyext.api import (
cpython_api, CANNOT_FAIL, Py_buffer)
from pypy.module.cpyext.pyobject import PyObject, Py_DecRef
@@ -13,7 +14,6 @@
return 1
class CBufferMixin(object):
- _mixin_ = True
def __init__(self, space, c_buf, c_len, w_obj):
self.space = space
@@ -35,7 +35,8 @@
return rffi.charpsize2str(rffi.cast(rffi.CCHARP, self.c_buf),
self.c_len)
-class CBuffer(CBufferMixin, buffer.Buffer):
+class CBuffer(buffer.Buffer):
+ import_from_mixin(CBufferMixin)
_immutable_ = True
def __del__(self):
From noreply at buildbot.pypy.org Wed Aug 13 18:11:33 2014
From: noreply at buildbot.pypy.org (Raemi)
Date: Wed, 13 Aug 2014 18:11:33 +0200 (CEST)
Subject: [pypy-commit] stmgc default: fix a race between doing shadow stack
snapshots and a concurrently running major
Message-ID: <20140813161133.E9C1D1C03AC@cobra.cs.uni-duesseldorf.de>
Author: Remi Meier
Branch:
Changeset: r1311:b7f132d1afba
Date: 2014-08-13 18:12 +0200
http://bitbucket.org/pypy/stmgc/changeset/b7f132d1afba/
Log: fix a race between doing shadow stack snapshots and a concurrently
running major collection. Also add more comments
diff --git a/c7/demo/demo_random2.c b/c7/demo/demo_random2.c
--- a/c7/demo/demo_random2.c
+++ b/c7/demo/demo_random2.c
@@ -16,7 +16,7 @@
#define FORKS 3
#define ACTIVE_ROOTS_SET_SIZE 100 // max num of roots created/alive in one transaction
-
+#define MAX_ROOTS_ON_SS 1000 // max on shadow stack
// SUPPORT
struct node_s;
@@ -127,12 +127,17 @@
{
int i;
long to_push = td.active_roots_num;
+ long not_pushed = 0;
for (i = to_push - 1; i >= 0; i--) {
- STM_PUSH_ROOT(stm_thread_local, td.active_roots_set[i]);
- td.roots_on_ss++;
td.active_roots_num--;
+ if (td.roots_on_ss < MAX_ROOTS_ON_SS) {
+ STM_PUSH_ROOT(stm_thread_local, td.active_roots_set[i]);
+ td.roots_on_ss++;
+ } else {
+ not_pushed++;
+ }
}
- return to_push;
+ return to_push - not_pushed;
}
void add_root(objptr_t r);
@@ -206,6 +211,7 @@
objptr_t simple_events(objptr_t p, objptr_t _r)
{
int k = get_rand(10);
+ long pushed;
switch (k) {
case 0: // remove a root
@@ -221,8 +227,7 @@
p = _r;
break;
case 3: // allocate fresh 'p'
- ;
- long pushed = push_roots();
+ pushed = push_roots();
size_t sizes[4] = {sizeof(struct node_s),
sizeof(struct node_s) + (get_rand(100000) & ~15),
sizeof(struct node_s) + 4096,
@@ -281,7 +286,6 @@
return p;
}
-
void frame_loop();
objptr_t do_step(objptr_t p)
{
@@ -306,28 +310,28 @@
td.roots_on_ss = td.roots_on_ss_at_tr_start;
td.active_roots_num = 0;
pop_roots(pushed);
- return NULL;
+ p = NULL;
} else if (get_rand(10) == 1) {
long pushed = push_roots();
/* leaving our frame */
frame_loop();
/* back in our frame */
pop_roots(pushed);
- return NULL;
+ p = NULL;
} else if (get_rand(20) == 1) {
long pushed = push_roots();
stm_become_inevitable(&stm_thread_local, "please");
assert(stm_is_inevitable());
pop_roots(pushed);
- return NULL;
+ p= NULL;
} else if (get_rand(20) == 1) {
- return (objptr_t)-1; // possibly fork
+ p = (objptr_t)-1; // possibly fork
} else if (get_rand(20) == 1) {
long pushed = push_roots();
stm_become_globally_unique_transaction(&stm_thread_local, "really");
fprintf(stderr, "[GUT/%d]", (int)STM_SEGMENT->segment_num);
pop_roots(pushed);
- return NULL;
+ p = NULL;
}
return p;
}
@@ -338,7 +342,9 @@
rewind_jmp_buf rjbuf;
stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf);
- volatile long roots_on_ss = td.roots_on_ss;
+ //fprintf(stderr,"%p F: %p\n", STM_SEGMENT->running_thread, __builtin_frame_address(0));
+
+ long roots_on_ss = td.roots_on_ss;
/* "interpreter main loop": this is one "application-frame" */
while (td.steps_left-->0 && get_rand(10) != 0) {
if (td.steps_left % 8 == 0)
@@ -348,6 +354,7 @@
p = do_step(p);
+
if (p == (objptr_t)-1) {
p = NULL;
diff --git a/c7/stm/core.c b/c7/stm/core.c
--- a/c7/stm/core.c
+++ b/c7/stm/core.c
@@ -997,6 +997,9 @@
/* NB. careful, this function might be called more than once to
abort a given segment. Make sure that
stm_rewind_jmp_restore_shadowstack() is idempotent. */
+ /* we need to do this here and not directly in rewind_longjmp() because
+ that is called when we already released everything (safe point)
+ and a concurrent major GC could mess things up. */
stm_rewind_jmp_restore_shadowstack(tl);
assert(tl->shadowstack == pseg->shadowstack_at_start_of_transaction);
#endif
diff --git a/c7/stm/rewind_setjmp.c b/c7/stm/rewind_setjmp.c
--- a/c7/stm/rewind_setjmp.c
+++ b/c7/stm/rewind_setjmp.c
@@ -4,6 +4,12 @@
#include
#include
+#ifndef _STM_CORE_H_
+long _has_mutex() {return 1;}
+void s_mutex_lock() {}
+void s_mutex_unlock() {}
+#endif
+
struct _rewind_jmp_moved_s {
struct _rewind_jmp_moved_s *next;
@@ -23,26 +29,39 @@
static void copy_stack(rewind_jmp_thread *rjthread, char *base, void *ssbase)
{
- /* Copy away part of the stack and shadowstack.
+ /* Copy away part of the stack and shadowstack. Sets moved_off_base to
+ the current frame_base.
+
The stack is copied between 'base' (lower limit, i.e. newest bytes)
and 'rjthread->head->frame_base' (upper limit, i.e. oldest bytes).
The shadowstack is copied between 'ssbase' (upper limit, newest)
and 'rjthread->head->shadowstack_base' (lower limit, oldest).
*/
+ struct _rewind_jmp_moved_s *next;
+ char *stop;
+ void *ssstop;
+ size_t stack_size, ssstack_size;
+
+ assert(_has_mutex());
+
assert(rjthread->head != NULL);
- char *stop = rjthread->head->frame_base;
+ stop = rjthread->head->frame_base;
+ ssstop = rjthread->head->shadowstack_base;
assert(stop >= base);
- void *ssstop = rjthread->head->shadowstack_base;
assert(ssstop <= ssbase);
- struct _rewind_jmp_moved_s *next = (struct _rewind_jmp_moved_s *)
- rj_malloc(RJM_HEADER + (stop - base) + (ssbase - ssstop));
+ stack_size = stop - base;
+ ssstack_size = ssbase - ssstop;
+
+ next = (struct _rewind_jmp_moved_s *)
+ rj_malloc(RJM_HEADER + stack_size + ssstack_size);
assert(next != NULL); /* XXX out of memory */
next->next = rjthread->moved_off;
- next->stack_size = stop - base;
- next->shadowstack_size = ssbase - ssstop;
- memcpy(((char *)next) + RJM_HEADER, base, stop - base);
- memcpy(((char *)next) + RJM_HEADER + (stop - base), ssstop,
- ssbase - ssstop);
+ next->stack_size = stack_size;
+ next->shadowstack_size = ssstack_size;
+
+ memcpy(((char *)next) + RJM_HEADER, base, stack_size);
+ memcpy(((char *)next) + RJM_HEADER + stack_size, ssstop,
+ ssstack_size);
rjthread->moved_off_base = stop;
rjthread->moved_off_ssbase = ssstop;
@@ -52,7 +71,12 @@
__attribute__((noinline))
long rewind_jmp_setjmp(rewind_jmp_thread *rjthread, void *ss)
{
+ /* saves the current stack frame to the list of slices and
+ calls setjmp(). It returns the number of times a longjmp()
+ jumped back to this setjmp() */
if (rjthread->moved_off) {
+ /* old stack slices are not needed anymore (next longjmp()
+ will restore only to this setjmp()) */
_rewind_jmp_free_stack_slices(rjthread);
}
/* all locals of this function that need to be saved and restored
@@ -72,22 +96,36 @@
result = rjthread->repeat_count + 1;
}
rjthread->repeat_count = result;
+
+ /* snapshot of top frame: needed every time because longjmp() frees
+ the previous one. Need to have mutex locked otherwise a concurrent
+ GC may get garbage while saving shadow stack */
+ s_mutex_lock();
copy_stack(rjthread, (char *)&saved, saved.ss1);
+ s_mutex_unlock();
+
return result;
}
__attribute__((noinline, noreturn))
static void do_longjmp(rewind_jmp_thread *rjthread, char *stack_free)
{
+ /* go through list of copied stack-slices and copy them back to the
+ current stack, expanding it if necessary. The shadowstack should
+ already be restored at this point (restore_shadowstack()) */
assert(rjthread->moved_off_base != NULL);
+ s_mutex_lock();
while (rjthread->moved_off) {
struct _rewind_jmp_moved_s *p = rjthread->moved_off;
char *target = rjthread->moved_off_base;
+ /* CPU stack grows downwards: */
target -= p->stack_size;
if (target < stack_free) {
/* need more stack space! */
+ s_mutex_unlock();
do_longjmp(rjthread, alloca(stack_free - target));
+ abort(); /* unreachable */
}
memcpy(target, ((char *)p) + RJM_HEADER, p->stack_size);
@@ -95,6 +133,8 @@
rjthread->moved_off = p->next;
rj_free(p);
}
+
+ s_mutex_unlock();
__builtin_longjmp(rjthread->jmpbuf, 1);
}
@@ -109,19 +149,25 @@
char *rewind_jmp_enum_shadowstack(rewind_jmp_thread *rjthread,
void *callback(void *, const void *, size_t))
{
+ /* enumerate all saved shadow-stack slices */
struct _rewind_jmp_moved_s *p = rjthread->moved_off;
char *sstarget = rjthread->moved_off_ssbase;
+ assert(_has_mutex());
+
while (p) {
- char *ssend = sstarget + p->shadowstack_size;
- callback(sstarget, ((char *)p) + RJM_HEADER + p->stack_size,
- p->shadowstack_size);
- sstarget = ssend;
+ if (p->shadowstack_size) {
+ void *ss_slice = ((char *)p) + RJM_HEADER + p->stack_size;
+ callback(sstarget, ss_slice, p->shadowstack_size);
+
+ sstarget += p->shadowstack_size;
+ }
p = p->next;
}
return sstarget;
}
+
char *rewind_jmp_restore_shadowstack(rewind_jmp_thread *rjthread)
{
return rewind_jmp_enum_shadowstack(rjthread, memcpy);
@@ -130,16 +176,23 @@
__attribute__((noinline))
void _rewind_jmp_copy_stack_slice(rewind_jmp_thread *rjthread)
{
+ /* called when leaving a frame. copies the now-current frame
+ to the list of stack-slices */
+ s_mutex_lock();
if (rjthread->head == NULL) {
_rewind_jmp_free_stack_slices(rjthread);
+ s_mutex_unlock();
return;
}
assert(rjthread->moved_off_base < (char *)rjthread->head);
copy_stack(rjthread, rjthread->moved_off_base, rjthread->moved_off_ssbase);
+ s_mutex_unlock();
}
void _rewind_jmp_free_stack_slices(rewind_jmp_thread *rjthread)
{
+ /* frees all saved stack copies */
+ assert(_has_mutex());
struct _rewind_jmp_moved_s *p = rjthread->moved_off;
struct _rewind_jmp_moved_s *pnext;
while (p) {
diff --git a/c7/stm/rewind_setjmp.h b/c7/stm/rewind_setjmp.h
--- a/c7/stm/rewind_setjmp.h
+++ b/c7/stm/rewind_setjmp.h
@@ -1,9 +1,20 @@
#ifndef _REWIND_SETJMP_H_
#define _REWIND_SETJMP_H_
+
#include
/************************************************************
+There is a singly-linked list of frames in each thread
+rjthread->head->prev->prev->prev
+
+Another singly-linked list is the list of copied stack-slices.
+When doing a setjmp(), we copy the top-frame, free all old
+stack-slices, and link it to the top-frame->moved_off.
+When returning from the top-frame while moved_off still points
+to a slice, we also need to copy the top-frame->prev frame/slice
+and add it to this list (pointed to by moved_off).
+--------------------------------------------------------------
: : ^^^^^
|-------------------| older frames in the stack
@@ -58,6 +69,7 @@
} rewind_jmp_thread;
+/* remember the current stack and ss_stack positions */
#define rewind_jmp_enterframe(rjthread, rjbuf, ss) do { \
(rjbuf)->frame_base = __builtin_frame_address(0); \
(rjbuf)->shadowstack_base = (char *)(ss); \
@@ -65,6 +77,8 @@
(rjthread)->head = (rjbuf); \
} while (0)
+/* go up one frame. if there was a setjmp call in this frame,
+ */
#define rewind_jmp_leaveframe(rjthread, rjbuf, ss) do { \
assert((rjbuf)->shadowstack_base == (char *)(ss)); \
(rjthread)->head = (rjbuf)->prev; \
From noreply at buildbot.pypy.org Thu Aug 14 08:17:53 2014
From: noreply at buildbot.pypy.org (waedt)
Date: Thu, 14 Aug 2014 08:17:53 +0200 (CEST)
Subject: [pypy-commit] pypy utf8-unicode2: Using byte-strings 8f5d79d24198
causes wierd rtyper errors. Try something else.
Message-ID: <20140814061753.525321C0323@cobra.cs.uni-duesseldorf.de>
Author: Tyler Wade
Branch: utf8-unicode2
Changeset: r72785:0016d703c625
Date: 2014-08-13 02:28 -0500
http://bitbucket.org/pypy/pypy/changeset/0016d703c625/
Log: Using byte-strings 8f5d79d24198 causes wierd rtyper errors. Try
something else.
diff --git a/pypy/module/_io/interp_stringio.py b/pypy/module/_io/interp_stringio.py
--- a/pypy/module/_io/interp_stringio.py
+++ b/pypy/module/_io/interp_stringio.py
@@ -27,23 +27,24 @@
newline = None
else:
newline = space.unicode_w(w_newline)
- newline = newline.bytes
- if (newline and newline != '\n' and newline != '\r\n' and
- newline != '\r'):
- # Not using oefmt() because I don't know how to ues it
+ if (newline is not None and len(newline) and
+ not (utf8.EQ(newline, Utf8Str('\n')) or
+ utf8.EQ(newline, Utf8Str('\r\n')) or
+ utf8.EQ(newline, Utf8Str('\r')))):
+ # Not using oefmt() because I don't know how to use it
# with unicode
raise OperationError(space.w_ValueError,
space.mod(
space.wrap("illegal newline value: %s"), space.wrap(newline)
)
)
-
if newline is not None:
self.readnl = newline
- self.readuniversal = not newline
+ self.readuniversal = newline is None or not len(newline)
self.readtranslate = newline is None
- if newline and newline[0] == '\r':
+ if (newline is not None and len(newline) and
+ utf8ord(newline) == ord("\r")):
self.writenl = newline
if self.readuniversal:
self.w_decoder = space.call_function(
@@ -143,10 +144,9 @@
else:
w_decoded = w_obj
- if self.writenl:
+ if self.writenl is not None and len(self.writenl):
w_decoded = space.call_method(
- w_decoded, "replace", space.wrap("\n"),
- space.wrap(Utf8Str(self.writenl))
+ w_decoded, "replace", space.wrap("\n"), space.wrap(self.writenl)
)
string = space.unicode_w(w_decoded)
diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py
--- a/pypy/module/_io/interp_textio.py
+++ b/pypy/module/_io/interp_textio.py
@@ -372,28 +372,28 @@
newline = None
else:
newline = space.unicode_w(w_newline)
- # newline is guaranteed to be either empty or ascii
- newline = newline.bytes
- if (newline and newline != '\n' and newline != '\r\n' and
- newline != '\r'):
+ if (newline is not None and len(newline) and
+ not (utf8.EQ(newline, Utf8Str('\n')) or
+ utf8.EQ(newline, Utf8Str('\r\n')) or
+ utf8.EQ(newline, Utf8Str('\r')))):
r = space.str_w(space.repr(w_newline))
raise OperationError(space.w_ValueError, space.wrap(
"illegal newline value: %s" % (r,)))
self.line_buffering = line_buffering
- self.readuniversal = not newline
+ self.readuniversal = newline is None or not len(newline)
self.readtranslate = newline is None
self.readnl = newline
- self.writetranslate = (newline is not None and newline != '')
+ self.writetranslate = (newline is not None and len(newline))
if not self.readuniversal:
self.writenl = self.readnl
- if self.writenl == '\n':
+ if utf8.EQ(self.writenl, Utf8Str('\n')):
self.writenl = None
elif _WINDOWS:
- self.writenl = "\r\n"
+ self.writenl = Utf8Str("\r\n")
else:
self.writenl = None
@@ -663,7 +663,7 @@
start = endpos = offset_to_buffer = 0
break
- if not remaining:
+ if remaining is None or not len(remaining):
line = self.decoded_chars
start = self.decoded_chars_used
offset_to_buffer = 0
@@ -705,22 +705,22 @@
# We have consumed the buffer
self._set_decoded_chars(None)
- if line:
+ if line is not None and len(line):
# Our line ends in the current buffer
decoded_chars_used = endpos - offset_to_buffer
assert decoded_chars_used >= 0
self.decoded_chars_used = decoded_chars_used
if start > 0 or endpos < len(line):
line = line[start:endpos]
- if remaining:
+ if remaining is not None and len(remaining):
chunks.append(remaining)
remaining = None
if chunks:
- if line:
+ if line is not None and len(line):
chunks.append(line)
line = Utf8Str('').join(chunks)
- if line:
+ if line is not None and len(line):
return space.wrap(line)
else:
return space.wrap(Utf8Str(''))
@@ -743,10 +743,12 @@
textlen = len(text)
haslf = False
- if (self.writetranslate and self.writenl) or self.line_buffering:
+ if (self.writetranslate and self.writenl is not None and
+ len(self.writenl)) or self.line_buffering:
if text.find('\n') >= 0:
haslf = True
- if haslf and self.writetranslate and self.writenl:
+ if (haslf and self.writetranslate and
+ self.writenl is not None and len(self.writenl)):
w_text = space.call_method(w_text, "replace",
space.wrap(Utf8Str('\n')),
space.wrap(self.writenl))
From noreply at buildbot.pypy.org Thu Aug 14 08:17:54 2014
From: noreply at buildbot.pypy.org (waedt)
Date: Thu, 14 Aug 2014 08:17:54 +0200 (CEST)
Subject: [pypy-commit] pypy utf8-unicode2: More untranslated vs translated
bools
Message-ID: <20140814061754.8B00B1C0323@cobra.cs.uni-duesseldorf.de>
Author: Tyler Wade
Branch: utf8-unicode2
Changeset: r72786:c5c9087c291c
Date: 2014-08-13 02:29 -0500
http://bitbucket.org/pypy/pypy/changeset/c5c9087c291c/
Log: More untranslated vs translated bools
diff --git a/pypy/objspace/std/newformat.py b/pypy/objspace/std/newformat.py
--- a/pypy/objspace/std/newformat.py
+++ b/pypy/objspace/std/newformat.py
@@ -462,7 +462,7 @@
self._precision = -1
spec = self.spec
- if not spec:
+ if (spec is None or not len(spec)):
return True
length = len(spec)
@@ -656,7 +656,7 @@
if self._fill_char == ord("0") and self._align == ord("="):
spec.n_min_width = self._width - extra_length
- if self._loc_thousands:
+ if self._loc_thousands is not None and len(self._loc_thousands):
self._group_digits(spec, digits[to_number:])
n_grouped_digits = len(self._grouped_digits)
else:
@@ -774,7 +774,7 @@
out.append_multiple_char(chr(fill_char), spec.n_spadding)
if spec.n_digits != 0:
- if self._loc_thousands:
+ if self._loc_thousands is not None and len(self._loc_thousands):
if grouped_digits is not None:
digits = grouped_digits
else:
diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py
--- a/pypy/objspace/std/stringmethods.py
+++ b/pypy/objspace/std/stringmethods.py
@@ -447,7 +447,7 @@
sb = self._builder(prealloc_size)
for i in range(size):
- if value and i != 0:
+ if (value is not None and len(value)) and i != 0:
sb.append(value)
sb.append(unwrapped[i])
return self._new(sb.build())
From noreply at buildbot.pypy.org Thu Aug 14 08:17:55 2014
From: noreply at buildbot.pypy.org (waedt)
Date: Thu, 14 Aug 2014 08:17:55 +0200 (CEST)
Subject: [pypy-commit] pypy utf8-unicode2: Simplify iterators. Use iterators
consistently when encoding unicode strings
Message-ID: <20140814061755.CDCC21C0323@cobra.cs.uni-duesseldorf.de>
Author: Tyler Wade
Branch: utf8-unicode2
Changeset: r72787:9f7fc269657f
Date: 2014-08-14 01:02 -0500
http://bitbucket.org/pypy/pypy/changeset/9f7fc269657f/
Log: Simplify iterators. Use iterators consistently when encoding unicode
strings
diff --git a/pypy/interpreter/test/test_utf8.py b/pypy/interpreter/test/test_utf8.py
--- a/pypy/interpreter/test/test_utf8.py
+++ b/pypy/interpreter/test/test_utf8.py
@@ -2,8 +2,7 @@
import py
import sys
-from pypy.interpreter.utf8 import (
- Utf8Str, Utf8Builder, utf8chr, utf8ord)
+from pypy.interpreter.utf8 import Utf8Str, Utf8Builder, utf8chr, utf8ord
from rpython.rtyper.lltypesystem import rffi
from rpython.rtyper.test.test_llinterp import interpret
@@ -29,24 +28,8 @@
def test_iterator():
s = build_utf8str()
iter = s.codepoint_iter()
- assert iter.peek_next() == 0x41
assert list(iter) == [0x41, 0x10F, 0x20AC, 0x1F63D]
- for i in range(1, 5):
- iter = s.codepoint_iter()
- iter.move(i)
- if i != 4:
- assert iter.peek_next() == [0x41, 0x10F, 0x20AC, 0x1F63D][i]
- l = list(iter)
- assert l == [0x41, 0x10F, 0x20AC, 0x1F63D][i:]
-
- for i in range(1, 5):
- iter = s.codepoint_iter()
- list(iter) # move the iterator to the end
- iter.move(-i)
- l = list(iter)
- assert l == [0x41, 0x10F, 0x20AC, 0x1F63D][4-i:]
-
iter = s.char_iter()
l = [s.bytes.decode('utf8') for s in list(iter)]
if sys.maxunicode < 65536:
@@ -54,26 +37,17 @@
else:
assert l == [u'A', u'\u010F', u'\u20AC', u'\U0001F63D']
-def test_reverse_iterator():
+def test_new_iterator():
s = build_utf8str()
- iter = s.reverse_codepoint_iter()
- assert iter.peek_next() == 0x1F63D
- assert list(iter) == [0x1F63D, 0x20AC, 0x10F, 0x41]
+ i = s.iter()
+ while not i.finished():
+ assert utf8ord(s, i.pos()) == i.current()
+ i.move(1)
- for i in range(1, 5):
- iter = s.reverse_codepoint_iter()
- iter.move(i)
- if i != 4:
- assert iter.peek_next() == [0x1F63D, 0x20AC, 0x10F, 0x41][i]
- l = list(iter)
- assert l == [0x1F63D, 0x20AC, 0x10F, 0x41][i:]
-
- for i in range(1, 5):
- iter = s.reverse_codepoint_iter()
- list(iter) # move the iterator to the end
- iter.move(-i)
- l = list(iter)
- assert l == [0x1F63D, 0x20AC, 0x10F, 0x41][4-i:]
+ i = s.iter(len(s) - 1)
+ while i.pos() >= 0:
+ assert utf8ord(s, i.pos()) == i.current()
+ i.move(-1)
def test_builder_append_slice():
builder = Utf8Builder()
@@ -146,7 +120,6 @@
s = Utf8Str(' ')
assert s.join([]) == u''
-
assert s.join([Utf8Str('one')]) == u'one'
assert s.join([Utf8Str('one'), Utf8Str('two')]) == u'one two'
diff --git a/pypy/interpreter/utf8.py b/pypy/interpreter/utf8.py
--- a/pypy/interpreter/utf8.py
+++ b/pypy/interpreter/utf8.py
@@ -309,18 +309,15 @@
def __unicode__(self):
return unicode(self.bytes, 'utf8')
+ def iter(self, start=0):
+ return Utf8Iterator(self, start)
+
def char_iter(self):
return Utf8CharacterIter(self)
- def reverse_char_iter(self):
- return Utf8ReverseCharacterIter(self)
-
def codepoint_iter(self):
return Utf8CodePointIter(self)
- def reverse_codepoint_iter(self):
- return Utf8ReverseCodePointIter(self)
-
@specialize.argtype(1, 2)
def _bound_check(self, start, end):
if start is None:
@@ -432,7 +429,7 @@
else:
break
- start_byte = iter.byte_pos
+ start_byte = iter._byte_pos
assert start_byte >= 0
if maxsplit == 0:
@@ -449,7 +446,7 @@
self._is_ascii))
break
- end = iter.byte_pos
+ end = iter._byte_pos
assert end >= 0
res.append(Utf8Str(self.bytes[start_byte:end], self._is_ascii))
maxsplit -= 1
@@ -466,32 +463,32 @@
other_bytes = other.bytes
return [Utf8Str(s) for s in self.bytes.rsplit(other_bytes, maxsplit)]
+ if len(self) == 0:
+ return []
+
res = []
- iter = self.reverse_codepoint_iter()
+ iter = self.iter(len(self) - 1)
while True:
# Find the start of the next word
- for cd in iter:
- if not unicodedb.isspace(cd):
- break
- else:
+ while iter.pos() >= 0 and unicodedb.isspace(iter.current()):
+ iter.move(-1)
+ if iter.pos() < 0:
break
- start_byte = self.next_char(iter.byte_pos)
-
+ start_byte = self.next_char(iter.byte_pos())
if maxsplit == 0:
res.append(Utf8Str(self.bytes[0:start_byte], self._is_ascii))
break
# Find the end of the word
- for cd in iter:
- if unicodedb.isspace(cd):
- break
- else:
+ while iter.pos() >= 0 and not unicodedb.isspace(iter.current()):
+ iter.move(-1)
+ if iter.pos() < 0:
# We hit the end of the string
res.append(Utf8Str(self.bytes[0:start_byte], self._is_ascii))
break
- end_byte = self.next_char(iter.byte_pos)
+ end_byte = self.next_char(iter.byte_pos())
res.append(Utf8Str(self.bytes[end_byte:start_byte],
self._is_ascii))
maxsplit -= 1
@@ -756,117 +753,27 @@
# _______________________________________________
-# iter.current is the current (ie the last returned) element
-# iter.pos isthe position of the current element
-# iter.byte_pos isthe byte position of the current element
-# In the before-the-start state, for foward iterators iter.pos and
-# iter.byte_pos are -1. For reverse iterators, they are len(ustr) and
-# len(ustr.bytes) respectively.
-
class ForwardIterBase(object):
def __init__(self, ustr):
- self.ustr = ustr
- self.pos = -1
-
- self._byte_pos = 0
- self.byte_pos = -1
- self.current = self._default
+ self._str = ustr
+ self._byte_pos = -1
def __iter__(self):
return self
def next(self):
- if self.pos + 1 == len(self.ustr):
+ if self._byte_pos == -1:
+ if len(self._str) == 0:
+ raise StopIteration()
+ self._byte_pos = 0
+ return self._value(0)
+
+ self._byte_pos = self._str.next_char(self._byte_pos)
+ if self._byte_pos == len(self._str.bytes):
raise StopIteration()
- self.pos += 1
- self.byte_pos = self._byte_pos
-
- self.current = self._value(self.byte_pos)
-
- self._byte_pos = self.ustr.next_char(self._byte_pos)
- return self.current
-
- def peek_next(self):
return self._value(self._byte_pos)
- def peek_prev(self):
- return self._value(self._move_backward(self.byte_pos))
-
- def move(self, count):
- if count > 0:
- self.pos += count
-
- while count != 1:
- self._byte_pos = self.ustr.next_char(self._byte_pos)
- count -= 1
- self.byte_pos = self._byte_pos
- self._byte_pos = self.ustr.next_char(self._byte_pos)
- self.current = self._value(self.byte_pos)
-
- elif count < 0:
- self.pos += count
- while count < -1:
- self.byte_pos = self.ustr.prev_char(self.byte_pos)
- count += 1
- self._byte_pos = self.byte_pos
- self.byte_pos = self.ustr.prev_char(self.byte_pos)
- self.current = self._value(self.byte_pos)
-
- def copy(self):
- iter = self.__class__(self.ustr)
- iter.pos = self.pos
- iter.byte_pos = self.byte_pos
- iter._byte_pos = self._byte_pos
- iter.current = self.current
- return iter
-
-class ReverseIterBase(object):
- def __init__(self, ustr):
- self.ustr = ustr
- self.pos = len(ustr)
- self.byte_pos = len(ustr.bytes)
- self.current = self._default
-
- def __iter__(self):
- return self
-
- def next(self):
- if self.pos == 0:
- raise StopIteration()
-
- self.pos -= 1
- self.byte_pos = self.ustr.prev_char(self.byte_pos)
- self.current = self._value(self.byte_pos)
- return self.current
-
- def peek_next(self):
- return self._value(self.ustr.prev_char(self.byte_pos))
-
- def peek_prev(self):
- return self._value(self.ustr.next_char(self.byte_pos))
-
- def move(self, count):
- if count > 0:
- self.pos -= count
- while count != 0:
- self.byte_pos = self.ustr.prev_char(self.byte_pos)
- count -= 1
- self.current = self._value(self.byte_pos)
- elif count < 0:
- self.pos -= count
- while count != 0:
- self.byte_pos = self.ustr.next_char(self.byte_pos)
- count += 1
- self.current = self._value(self.byte_pos)
-
- def copy(self):
- iter = self.__class__(self.ustr)
- iter.pos = self.pos
- iter.byte_pos = self.byte_pos
- iter.current = self.current
- return iter
-
def make_iterator(name, base, calc_value, default):
class C(object):
import_from_mixin(base, ['__init__', '__iter__'])
@@ -876,32 +783,91 @@
return C
def codepoint_calc_value(self, byte_pos):
- if byte_pos == -1 or byte_pos == len(self.ustr.bytes):
+ if byte_pos == -1 or byte_pos == len(self._str.bytes):
return -1
- return utf8ord_bytes(self.ustr.bytes, byte_pos)
+ return utf8ord_bytes(self._str.bytes, byte_pos)
def character_calc_value(self, byte_pos):
- if byte_pos == -1 or byte_pos == len(self.ustr.bytes):
+ if byte_pos == -1 or byte_pos == len(self._str.bytes):
return None
- length = utf8_code_length[ord(self.ustr.bytes[self.byte_pos])]
- return Utf8Str(''.join([self.ustr.bytes[i]
- for i in range(self.byte_pos, self.byte_pos + length)]),
+ length = utf8_code_length[ord(self._str.bytes[self._byte_pos])]
+ return Utf8Str(''.join([self._str.bytes[i]
+ for i in range(self._byte_pos, self._byte_pos + length)]),
length == 1)
Utf8CodePointIter = make_iterator("Utf8CodePointIter", ForwardIterBase,
codepoint_calc_value, -1)
Utf8CharacterIter = make_iterator("Utf8CharacterIter", ForwardIterBase,
character_calc_value, None)
-Utf8ReverseCodePointIter = make_iterator(
- "Utf8ReverseCodePointIter", ReverseIterBase, codepoint_calc_value, -1)
-Utf8ReverseCharacterIter = make_iterator(
- "Utf8ReverseCharacterIter", ReverseIterBase, character_calc_value, None)
del make_iterator
del codepoint_calc_value
del character_calc_value
del ForwardIterBase
-del ReverseIterBase
+# _______________________________________________
+
+class Utf8Iterator(object):
+ def __init__(self, str, start=0):
+ self._str = str
+
+ self._pos = start
+ self._byte_pos = str.index_of_char(start)
+
+ self._calc_current()
+
+ def _calc_current(self):
+ if self._pos >= len(self._str) or self._pos < 0:
+ raise IndexError()
+ else:
+ self._current = utf8ord_bytes(self._str.bytes, self._byte_pos)
+
+ def current(self):
+ if self._current == -1:
+ self._calc_current()
+ return self._current
+
+ def pos(self):
+ return self._pos
+
+ def byte_pos(self):
+ return self._byte_pos
+
+ def move(self, count):
+ # TODO: As an optimization, we could delay moving byte_pos until we
+ # _calc_current
+ if count > 0:
+ self._pos += count
+
+ if self._pos < 0:
+ self._byte_pos = 0
+ else:
+ while count != 0:
+ self._byte_pos = self._str.next_char(self._byte_pos)
+ count -= 1
+ self._current = -1
+
+ elif count < 0:
+ self._pos += count
+
+ if self._pos < 0:
+ self._byte_pos = 0
+ else:
+ while count < 0:
+ self._byte_pos = self._str.prev_char(self._byte_pos)
+ count += 1
+ self._current = -1
+
+ def finished(self):
+ return self._pos >= len(self._str)
+
+ def copy(self):
+ i = Utf8Iterator(self._str)
+ i._pos = self._pos
+ i._byte_pos = self._byte_pos
+ i._current = self._current
+ return i
+
+
diff --git a/pypy/interpreter/utf8_codecs.py b/pypy/interpreter/utf8_codecs.py
--- a/pypy/interpreter/utf8_codecs.py
+++ b/pypy/interpreter/utf8_codecs.py
@@ -327,15 +327,15 @@
if size == 0:
return ''
result = StringBuilder(size)
- pos = 0
- while pos < size:
- oc = utf8ord(s, pos)
-
+ iter = s.iter()
+ while not iter.finished():
+ oc = iter.current()
if oc < 0x100:
result.append(chr(oc))
else:
raw_unicode_escape_helper(result, oc)
- pos += 1
+
+ iter.move(1)
return result.build()
@@ -397,28 +397,29 @@
if size == 0:
return ''
result = StringBuilder(size)
- pos = 0
- while pos < size:
- od = utf8ord(p, pos)
+ iter = p.iter()
+ while not iter.finished():
+ od = iter.current()
if od < limit:
result.append(chr(od))
- pos += 1
+ iter.move(1)
else:
- # startpos for collecting unencodable chars
- collstart = pos
- collend = pos+1
- while collend < len(p) and utf8ord(p, collend) >= limit:
- collend += 1
+ coll = iter.copy()
+ while not coll.finished() and coll.current() >= limit:
+ coll.move(1)
+ collstart = iter.pos()
+ collend = coll.pos()
+
ru, rs, pos = errorhandler(errors, encoding, reason, p,
collstart, collend)
+ iter.move(pos - iter.pos())
if rs is not None:
# py3k only
result.append(rs)
continue
- for ch in ru:
- cd = utf8ord(ch, 0)
+ for cd in ru.codepoint_iter():
if cd < limit:
result.append(chr(cd))
else:
@@ -452,41 +453,48 @@
allow_surrogates)
def unicode_encode_utf_8_impl(s, size, errors, errorhandler, allow_surrogates):
- iter = s.codepoint_iter()
- for oc in iter:
+ iter = s.iter()
+
+ while not iter.finished():
+ oc = iter.current()
if oc >= 0xD800 and oc <= 0xDFFF:
break
- if iter.pos == size:
- return s.bytes
- else:
+ iter.move(1)
+ if iter.finished():
return s.bytes
result = Utf8Builder(len(s.bytes))
- result.append_slice(s.bytes, 0, iter.byte_pos)
+ result.append_slice(s.bytes, 0, iter.byte_pos())
- iter.move(-1)
- for oc in iter:
+ while not iter.finished():
+ oc = iter.current()
+ iter.move(1)
+
if oc >= 0xD800 and oc <= 0xDFFF:
# Check the next character to see if this is a surrogate pair
- if (iter.pos != len(s) and oc <= 0xDBFF and
- 0xDC00 <= iter.peek_next() <= 0xDFFF):
- oc2 = iter.next()
+ if (not iter.finished() and oc <= 0xDBFF and
+ 0xDC00 <= iter.current() <= 0xDFFF):
+
+ oc2 = iter.current()
result.append_codepoint(
((oc - 0xD800) << 10 | (oc2 - 0xDC00)) + 0x10000)
+ iter.move(1)
+
elif allow_surrogates:
result.append_codepoint(oc)
else:
ru, rs, pos = errorhandler(errors, 'utf8',
'surrogates not allowed', s,
- iter.pos-1, iter.pos)
- iter.move(pos - iter.pos)
+ iter.pos()-2, iter.pos()-1)
+ iter.move(pos - iter.pos())
if rs is not None:
# py3k only
result.append_utf8(rs)
+ iter.move(1)
continue
- for ch in ru:
- if ord(ch) < 0x80:
- result.append_ascii(ch)
+ for ch in ru.codepoint_iter():
+ if ch < 0x80:
+ result.append_ascii(chr(ch))
else:
errorhandler('strict', 'utf8',
'surrogates not allowed',
@@ -809,10 +817,10 @@
_STORECHAR(result, 0xFEFF, BYTEORDER)
byteorder = BYTEORDER
- i = 0
- while i < size:
- ch = utf8ord(s, i)
- i += 1
+ iter = s.iter()
+ while not iter.finished():
+ ch = iter.current()
+ iter.move(1)
ch, ch2 = create_surrogate_pair(ch)
_STORECHAR(result, ch, byteorder)
@@ -980,16 +988,16 @@
_STORECHAR32(result, 0xFEFF, BYTEORDER)
byteorder = BYTEORDER
- i = 0
- while i < size:
- ch = utf8ord(s, i)
- i += 1
+ iter = s.iter()
+ while not iter.finished():
+ ch = iter.current()
+ iter.move(1)
ch2 = 0
if MAXUNICODE < 65536 and 0xD800 <= ch <= 0xDBFF and i < size:
- ch2 = ord(s[i])
+ ch2 = iter.current()
if 0xDC00 <= ch2 <= 0xDFFF:
ch = (((ch & 0x3FF)<<10) | (ch2 & 0x3FF)) + 0x10000;
- i += 1
+ iter.move(1)
_STORECHAR32(result, ch, byteorder)
return result.build()
@@ -1228,10 +1236,9 @@
base64bits = 0
base64buffer = 0
- # TODO: Looping like this is worse than O(n)
- pos = 0
- while pos < size:
- oc = utf8ord(s, pos)
+ iter = s.iter()
+ while not iter.finished():
+ oc = iter.current()
if not inShift:
if oc == ord('+'):
result.append('+-')
@@ -1260,7 +1267,7 @@
else:
base64bits, base64buffer = _utf7_ENCODE_CHAR(
result, oc, base64bits, base64buffer)
- pos += 1
+ iter.move(1)
if base64bits:
result.append(_utf7_TO_BASE64(base64buffer << (6 - base64bits)))
@@ -1318,15 +1325,17 @@
if size == 0:
return ''
result = StringBuilder(size)
- pos = 0
- while pos < size:
- ch = s[pos]
+
+ iter = s.iter()
+ while not iter.finished():
+ ch = utf8chr(iter.current())
c = mapping.get(ch, '')
if len(c) == 0:
ru, rs, pos = errorhandler(errors, "charmap",
"character maps to ",
- s, pos, pos + 1)
+ s, iter.pos(), iter.pos() + 1)
+ iter.move(pos - iter.pos())
if rs is not None:
# py3k only
result.append(rs)
@@ -1337,11 +1346,11 @@
errorhandler(
"strict", "charmap",
"character maps to ",
- s, pos, pos + 1)
+ s, iter.pos(), iter.pos() + 1)
result.append(c2)
continue
result.append(c)
- pos += 1
+ iter.move(1)
return result.build()
# }}}
@@ -1367,9 +1376,9 @@
errorhandler = default_unicode_error_decode
if BYTEORDER == 'little':
- iorder = [0, 1, 2, 3]
+ iorder = (0, 1, 2, 3)
else:
- iorder = [3, 2, 1, 0]
+ iorder = (3, 2, 1, 0)
if size == 0:
return Utf8Str(''), 0
@@ -1542,30 +1551,35 @@
if size == 0:
return ''
result = StringBuilder(size)
- pos = 0
- while pos < size:
- ch = utf8ord(s, pos)
+
+ iter = s.iter()
+ while not iter.finished():
+ ch = iter.current()
+
if unicodedb.isspace(ch):
result.append(' ')
- pos += 1
+ iter.move(1)
continue
+
try:
decimal = unicodedb.decimal(ch)
except KeyError:
pass
else:
result.append(chr(48 + decimal))
- pos += 1
+ iter.move(1)
continue
+
if 0 < ch < 256:
result.append(chr(ch))
- pos += 1
+ iter.move(1)
continue
+
# All other characters are considered unencodable
- collstart = pos
- collend = collstart + 1
- while collend < size:
- ch = utf8ord(s, collend)
+ colliter = iter.copy()
+ colliter.move(1)
+ while not colliter.finished():
+ ch = colliter.current()
try:
if (0 < ch < 256 or
unicodedb.isspace(ch) or
@@ -1574,15 +1588,19 @@
except KeyError:
# not a decimal
pass
- collend += 1
+ colliter.move(1)
+
+ collstart = iter.pos()
+ collend = colliter.pos()
+
msg = "invalid decimal Unicode string"
ru, rs, pos = errorhandler(errors, 'decimal',
msg, s, collstart, collend)
+ iter.move(pos - iter.pos())
if rs is not None:
# py3k only
errorhandler('strict', 'decimal', msg, s, collstart, collend)
- for i in range(len(ru)):
- ch = utf8.ORD(ru, i)
+ for ch in ru.codepoint_iter():
if unicodedb.isspace(ch):
result.append(' ')
continue
From noreply at buildbot.pypy.org Thu Aug 14 10:35:33 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Thu, 14 Aug 2014 10:35:33 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: * _is_pinned takes
'pinned_objects' into account.
Message-ID: <20140814083533.36EDB1C0323@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72788:28a9b76f8eef
Date: 2014-08-13 14:12 +0200
http://bitbucket.org/pypy/pypy/changeset/28a9b76f8eef/
Log: * _is_pinned takes 'pinned_objects' into account.
* removed 'we_are_translated' from pin()/unpin() as they are always
not translated (rgc.py version).
* added some comments for clarification
diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py
--- a/rpython/rlib/rgc.py
+++ b/rpython/rlib/rgc.py
@@ -46,11 +46,8 @@
Note further that pinning an object does not prevent it from being
collected if it is not used anymore.
"""
- if we_are_translated():
- return False
- else:
- pinned_objects.append(obj)
- return True
+ pinned_objects.append(obj)
+ return True
class PinEntry(ExtRegistryEntry):
@@ -68,11 +65,7 @@
"""Unpin 'obj', allowing it to move again.
Must only be called after a call to pin(obj) returned True.
"""
- if we_are_translated():
- raise AssertionError("pin() always returns False, "
- "so unpin() should not be called")
- else:
- pinned_objects.remove(obj)
+ pinned_objects.remove(obj)
class UnpinEntry(ExtRegistryEntry):
@@ -87,7 +80,7 @@
def _is_pinned(obj):
"""Method to check if 'obj' is pinned."""
- return False
+ return obj in pinned_objects
class IsPinnedEntry(ExtRegistryEntry):
_about_ = _is_pinned
@@ -160,9 +153,12 @@
on objects that are already a bit old, so have a chance to be
already non-movable."""
if not we_are_translated():
- return p not in pinned_objects
+ # for testing purpose
+ return not _is_pinned(p)
#
if _is_pinned(p):
+ # although a pinned object can't move we must return 'False'. A pinned
+ # object can be unpinned any time and becomes movable.
return False
i = 0
while can_move(p):
From noreply at buildbot.pypy.org Thu Aug 14 10:35:34 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Thu, 14 Aug 2014 10:35:34 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: renamed parameters for
consistency
Message-ID: <20140814083534.805601C0323@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72789:9a727746d96f
Date: 2014-08-13 15:21 +0200
http://bitbucket.org/pypy/pypy/changeset/9a727746d96f/
Log: renamed parameters for consistency
diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py
--- a/rpython/rlib/rgc.py
+++ b/rpython/rlib/rgc.py
@@ -53,7 +53,7 @@
class PinEntry(ExtRegistryEntry):
_about_ = pin
- def compute_result_annotation(self, s_p):
+ def compute_result_annotation(self, s_obj):
from rpython.annotator import model as annmodel
return annmodel.SomeBool()
@@ -71,7 +71,7 @@
class UnpinEntry(ExtRegistryEntry):
_about_ = unpin
- def compute_result_annotation(self, s_p):
+ def compute_result_annotation(self, s_obj):
pass
def specialize_call(self, hop):
@@ -85,7 +85,7 @@
class IsPinnedEntry(ExtRegistryEntry):
_about_ = _is_pinned
- def compute_result_annotation(self, s_p):
+ def compute_result_annotation(self, s_obj):
from rpython.annotator import model as annmodel
return annmodel.SomeBool()
From noreply at buildbot.pypy.org Thu Aug 14 10:35:35 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Thu, 14 Aug 2014 10:35:35 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: as discussed with fijal
(irc, 2014-08-13) should be correct.
Message-ID: <20140814083535.B01551C0323@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72790:7243202f2115
Date: 2014-08-13 16:54 +0200
http://bitbucket.org/pypy/pypy/changeset/7243202f2115/
Log: as discussed with fijal (irc, 2014-08-13) should be correct.
diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py
--- a/rpython/rtyper/lltypesystem/lloperation.py
+++ b/rpython/rtyper/lltypesystem/lloperation.py
@@ -480,9 +480,9 @@
'gc_writebarrier': LLOp(canrun=True),
'gc_writebarrier_before_copy': LLOp(canrun=True),
'gc_heap_stats' : LLOp(canmallocgc=True),
- 'gc_pin' : LLOp(canrun=True), # XXX understand this, correct? (groggi)
- 'gc_unpin' : LLOp(canrun=True), # XXX understand this, correct? (groggi)
- 'gc__is_pinned' : LLOp(canrun=True), # XXX understand this, correct? (groggi)
+ 'gc_pin' : LLOp(canrun=True),
+ 'gc_unpin' : LLOp(canrun=True),
+ 'gc__is_pinned' : LLOp(canrun=True),
'gc_get_rpy_roots' : LLOp(),
'gc_get_rpy_referents': LLOp(),
From noreply at buildbot.pypy.org Thu Aug 14 10:35:36 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Thu, 14 Aug 2014 10:35:36 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: comments edited
Message-ID: <20140814083536.DDB581C0323@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72791:2a4ed31fc381
Date: 2014-08-13 17:53 +0200
http://bitbucket.org/pypy/pypy/changeset/2a4ed31fc381/
Log: comments edited
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -1225,13 +1225,11 @@
# but this flag is progressively removed in the sweeping phase.
# All objects should have this flag, except if they
- # don't have any GC pointer
+ # don't have any GC pointer or are pinned objects
typeid = self.get_type_id(obj)
- if not self._is_pinned(obj):
- # XXX do we need checks if the object is actually pinned? (groggi)
- if self.has_gcptr(typeid):
- ll_assert(self.header(obj).tid & GCFLAG_TRACK_YOUNG_PTRS != 0,
- "missing GCFLAG_TRACK_YOUNG_PTRS")
+ if self.has_gcptr(typeid) and not self._is_pinned(obj):
+ ll_assert(self.header(obj).tid & GCFLAG_TRACK_YOUNG_PTRS != 0,
+ "missing GCFLAG_TRACK_YOUNG_PTRS")
# the GCFLAG_FINALIZATION_ORDERING should not be set between coll.
ll_assert(self.header(obj).tid & GCFLAG_FINALIZATION_ORDERING == 0,
"unexpected GCFLAG_FINALIZATION_ORDERING")
@@ -1530,15 +1528,11 @@
#
# Keeps track of surviving pinned objects. See also '_trace_drag_out()'
# where this stack is filled. Pinning an object only prevents it from
- # being move, not from being collected if it is not used anymore.
+ # being moved, not from being collected if it is not reachable anymore.
self.surviving_pinned_objects = self.AddressStack()
#
# The following counter keeps track of the amount of alive and pinned
- # objects inside the nursery. The counter is reset, as we have to
- # check which pinned objects are actually still alive. Pinning an
- # object does not prevent the removal of an object, if it's not used
- # anymore.
- # XXX is this true? does it make sense? (groggi)
+ # objects inside the nursery.
self.pinned_objects_in_nursery = 0
#
# Before everything else, remove from 'old_objects_pointing_to_young'
From noreply at buildbot.pypy.org Thu Aug 14 10:35:38 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Thu, 14 Aug 2014 10:35:38 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: added tests for prebuilt
objects and pinning
Message-ID: <20140814083538.15E6C1C0323@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72792:d28c2f266a13
Date: 2014-08-13 17:54 +0200
http://bitbucket.org/pypy/pypy/changeset/d28c2f266a13/
Log: added tests for prebuilt objects and pinning
diff --git a/rpython/memory/gc/test/test_object_pinning.py b/rpython/memory/gc/test/test_object_pinning.py
--- a/rpython/memory/gc/test/test_object_pinning.py
+++ b/rpython/memory/gc/test/test_object_pinning.py
@@ -40,6 +40,13 @@
self.gc.unpin(adr)
assert not self.gc._is_pinned(adr)
+ def test_prebuilt_not_pinnable(self):
+ ptr = lltype.malloc(S, immortal=True)
+ self.consider_constant(ptr)
+ assert not self.gc.pin(llmemory.cast_ptr_to_adr(ptr))
+ self.gc.collect()
+ assert not self.gc.pin(llmemory.cast_ptr_to_adr(ptr))
+
# XXX test with multiple mallocs, and only part of them is pinned
@@ -445,6 +452,41 @@
self.pin_referenced_from_young_in_stackroots(self.gc.collect)
+ def pin_referenced_from_prebuilt(self, collect_func):
+ # scenario: a prebuilt object points to a pinned object. Check if the
+ # pinned object doesn't move and is still accessible.
+ #
+ prebuilt_ptr = lltype.malloc(S, immortal=True)
+ prebuilt_ptr.someInt = 900
+ self.consider_constant(prebuilt_ptr)
+ prebuilt_adr = llmemory.cast_ptr_to_adr(prebuilt_ptr)
+ collect_func()
+ #
+ pinned_ptr = self.malloc(S)
+ pinned_ptr.someInt = 100
+ self.write(prebuilt_ptr, 'next', pinned_ptr)
+ pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr)
+ assert self.gc.pin(pinned_adr)
+ #
+ # check if everything is as expected
+ assert not self.gc.is_in_nursery(prebuilt_adr)
+ assert self.gc.is_in_nursery(pinned_adr)
+ assert pinned_ptr == prebuilt_ptr.next
+ assert pinned_ptr.someInt == 100
+ #
+ # do a collection and check again
+ collect_func()
+ assert self.gc.is_in_nursery(pinned_adr)
+ assert pinned_ptr == prebuilt_ptr.next
+ assert pinned_ptr.someInt == 100
+
+ def test_pin_referenced_from_prebuilt_minor_collection(self):
+ self.pin_referenced_from_prebuilt(self.gc.minor_collection)
+
+ def test_pin_referenced_from_prebuilt_major_collection(self):
+ self.pin_referenced_from_prebuilt(self.gc.collect)
+
+
def pin_shadow_1(self, collect_func):
ptr = self.malloc(S)
adr = llmemory.cast_ptr_to_adr(ptr)
From noreply at buildbot.pypy.org Thu Aug 14 10:35:39 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Thu, 14 Aug 2014 10:35:39 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: assert for case that
should never happen
Message-ID: <20140814083539.3B34E1C0323@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72793:d6f675594c58
Date: 2014-08-13 20:04 +0200
http://bitbucket.org/pypy/pypy/changeset/d6f675594c58/
Log: assert for case that should never happen
diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py
--- a/rpython/jit/backend/llsupport/gc.py
+++ b/rpython/jit/backend/llsupport/gc.py
@@ -135,8 +135,7 @@
#
if op.is_guard() or op.getopnum() == rop.FINISH:
llref = cast_instance_to_gcref(op.getdescr())
- if not rgc._make_sure_does_not_move(llref):
- raise NotImplementedError("blub") # XXX handle (groggi)
+ assert rgc._make_sure_does_not_move(llref)
gcrefs_output_list.append(llref)
newops.append(op)
return newops
From noreply at buildbot.pypy.org Thu Aug 14 11:47:14 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Thu, 14 Aug 2014 11:47:14 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: size of array for
movable objects is now as large as it needs to be.
Message-ID: <20140814094714.85E001C06C9@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72794:5dcc35cb8954
Date: 2014-08-14 11:30 +0200
http://bitbucket.org/pypy/pypy/changeset/5dcc35cb8954/
Log: size of array for movable objects is now as large as it needs to be.
there is still some optimization possible: if only one object is
pinned but used in six operations, the array will contain six
pointers pointing to the same pinned object.
diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py
--- a/rpython/jit/backend/llsupport/gc.py
+++ b/rpython/jit/backend/llsupport/gc.py
@@ -112,8 +112,8 @@
def gc_malloc_unicode(self, num_elem):
return self._bh_malloc_array(num_elem, self.unicode_descr)
- def _record_constptrs(self, op, gcrefs_output_list, pinned_obj_tracker):
- newops = []
+ def _record_constptrs(self, op, gcrefs_output_list, moving_output_list):
+ moving_output_list[op] = []
for i in range(op.numargs()):
v = op.getarg(i)
if isinstance(v, ConstPtr) and bool(v.value):
@@ -121,22 +121,34 @@
if rgc._make_sure_does_not_move(p):
gcrefs_output_list.append(p)
else:
- # encountered a pointer that points to a possibly moving object.
- # Solve the problem by double loading the address to the object
- # each run of the JITed code.
- result_ptr = BoxPtr()
- array_index = pinned_obj_tracker.add_ref(p)
- load_op = ResOperation(rop.GETARRAYITEM_GC,
- [ConstPtr(pinned_obj_tracker.ref_array_gcref), ConstInt(array_index)],
- result_ptr,
- descr=pinned_obj_tracker.ref_array_descr)
- newops.append(load_op)
- op.setarg(i, result_ptr)
+ moving_output_list[op].append(i)
#
if op.is_guard() or op.getopnum() == rop.FINISH:
llref = cast_instance_to_gcref(op.getdescr())
assert rgc._make_sure_does_not_move(llref)
gcrefs_output_list.append(llref)
+ #
+ if len(moving_output_list[op]) == 0:
+ del moving_output_list[op]
+
+ def _rewrite_constptrs(self, op, moving_output_list, pinned_obj_tracker):
+ newops = []
+ for arg_i in moving_output_list[op]:
+ v = op.getarg(arg_i)
+ # assert to make sure we got what we expected
+ assert isinstance(v, ConstPtr)
+ assert bool(v.value)
+ p = v.value
+ result_ptr = BoxPtr()
+ array_index = pinned_obj_tracker.add_ref(p)
+ load_op = ResOperation(rop.GETARRAYITEM_GC,
+ [ConstPtr(pinned_obj_tracker.ref_array_gcref),
+ ConstInt(array_index)],
+ result_ptr,
+ descr=pinned_obj_tracker.ref_array_descr)
+ newops.append(load_op)
+ op.setarg(arg_i, result_ptr)
+ #
newops.append(op)
return newops
@@ -147,18 +159,27 @@
# keep them alive if they end up as constants in the assembler
# XXX add comment (groggi)
- # XXX handle size in a not constant way? Get it from the GC? (groggi)
- pinned_obj_tracker = PinnedObjectTracker(cpu, 100)
- if not we_are_translated():
- self.last_pinned_object_tracker = pinned_obj_tracker
- gcrefs_output_list.append(pinned_obj_tracker.ref_array_gcref)
- rgc._make_sure_does_not_move(pinned_obj_tracker.ref_array_gcref)
-
+
newnewops = [] # XXX better name... (groggi)
+ moving_output_list = {}
for op in newops:
- ops = self._record_constptrs(op, gcrefs_output_list, pinned_obj_tracker)
- newnewops.extend(ops)
+ self._record_constptrs(op, gcrefs_output_list, moving_output_list)
+ #
+ if len(moving_output_list) > 0:
+ pinned_obj_tracker = PinnedObjectTracker(cpu, len(moving_output_list))
+ if not we_are_translated():
+ self.last_pinned_object_tracker = pinned_obj_tracker
+ gcrefs_output_list.append(pinned_obj_tracker.ref_array_gcref)
+ rgc._make_sure_does_not_move(pinned_obj_tracker.ref_array_gcref)
+
+ for op in newops:
+ if op in moving_output_list:
+ reops = self._rewrite_constptrs(op, moving_output_list,
+ pinned_obj_tracker)
+ newnewops.extend(reops)
+ else:
+ newnewops.append(op)
return newnewops
@specialize.memo()
From noreply at buildbot.pypy.org Thu Aug 14 12:42:46 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Thu, 14 Aug 2014 12:42:46 +0200 (CEST)
Subject: [pypy-commit] pypy.org extradoc: update the values
Message-ID: <20140814104246.68DF61C06C9@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: extradoc
Changeset: r527:393ae5365189
Date: 2014-08-14 12:42 +0200
http://bitbucket.org/pypy/pypy.org/changeset/393ae5365189/
Log: update the values
diff --git a/don1.html b/don1.html
--- a/don1.html
+++ b/don1.html
@@ -15,7 +15,7 @@
- $52304 of $105000 (49.8%)
+ $52313 of $105000 (49.8%)
From noreply at buildbot.pypy.org Thu Aug 14 13:02:16 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Thu, 14 Aug 2014 13:02:16 +0200 (CEST)
Subject: [pypy-commit] stmgc default: Fix.
Message-ID: <20140814110216.DA3C81D2320@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r1312:43bf7ea2e593
Date: 2014-08-14 13:02 +0200
http://bitbucket.org/pypy/stmgc/changeset/43bf7ea2e593/
Log: Fix.
diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c
--- a/c7/stm/gcpage.c
+++ b/c7/stm/gcpage.c
@@ -397,17 +397,20 @@
mark_visit_object(current->ss, segment_base);
}
mark_visit_object(tl->thread_local_obj, segment_base);
- stm_rewind_jmp_enum_shadowstack(tl, mark_visit_objects_from_ss);
tl = tl->next;
} while (tl != stm_all_thread_locals);
long i;
for (i = 1; i <= NB_SEGMENTS; i++) {
- if (get_priv_segment(i)->transaction_state != TS_NONE)
+ if (get_priv_segment(i)->transaction_state != TS_NONE) {
mark_visit_object(
get_priv_segment(i)->threadlocal_at_start_of_transaction,
get_segment_base(i));
+ stm_rewind_jmp_enum_shadowstack(
+ get_segment(i)->running_thread,
+ mark_visit_objects_from_ss);
+ }
}
}
diff --git a/c7/stm/rewind_setjmp.c b/c7/stm/rewind_setjmp.c
--- a/c7/stm/rewind_setjmp.c
+++ b/c7/stm/rewind_setjmp.c
@@ -194,12 +194,12 @@
/* frees all saved stack copies */
assert(_has_mutex());
struct _rewind_jmp_moved_s *p = rjthread->moved_off;
- struct _rewind_jmp_moved_s *pnext;
while (p) {
- pnext = p->next;
+ struct _rewind_jmp_moved_s *pnext = p->next;
rj_free(p);
p = pnext;
}
rjthread->moved_off = NULL;
rjthread->moved_off_base = NULL;
+ rjthread->moved_off_ssbase = NULL;
}
From noreply at buildbot.pypy.org Thu Aug 14 15:02:55 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Thu, 14 Aug 2014 15:02:55 +0200 (CEST)
Subject: [pypy-commit] stmgc default: Move the mutex locking outside
rewind_setjmp.c, and remove some of
Message-ID: <20140814130255.056FD1C06C9@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r1313:1e16b1651dd4
Date: 2014-08-14 14:46 +0200
http://bitbucket.org/pypy/stmgc/changeset/1e16b1651dd4/
Log: Move the mutex locking outside rewind_setjmp.c, and remove some of
it, with some justification as comments.
diff --git a/c7/stm/core.c b/c7/stm/core.c
--- a/c7/stm/core.c
+++ b/c7/stm/core.c
@@ -328,8 +328,6 @@
{
assert(!_stm_in_transaction(tl));
- s_mutex_lock();
-
retry:
if (inevitable) {
wait_for_end_of_inevitable_transaction(tl);
@@ -390,6 +388,7 @@
long stm_start_transaction(stm_thread_local_t *tl)
{
+ s_mutex_lock();
#ifdef STM_NO_AUTOMATIC_SETJMP
long repeat_count = 0; /* test/support.py */
#else
@@ -401,6 +400,7 @@
void stm_start_inevitable_transaction(stm_thread_local_t *tl)
{
+ s_mutex_lock();
_stm_start_transaction(tl, true);
}
@@ -1077,6 +1077,7 @@
#ifdef STM_NO_AUTOMATIC_SETJMP
_test_run_abort(tl);
#else
+ s_mutex_lock();
stm_rewind_jmp_longjmp(tl);
#endif
}
diff --git a/c7/stm/rewind_setjmp.c b/c7/stm/rewind_setjmp.c
--- a/c7/stm/rewind_setjmp.c
+++ b/c7/stm/rewind_setjmp.c
@@ -4,12 +4,6 @@
#include
#include
-#ifndef _STM_CORE_H_
-long _has_mutex() {return 1;}
-void s_mutex_lock() {}
-void s_mutex_unlock() {}
-#endif
-
struct _rewind_jmp_moved_s {
struct _rewind_jmp_moved_s *next;
@@ -42,8 +36,6 @@
void *ssstop;
size_t stack_size, ssstack_size;
- assert(_has_mutex());
-
assert(rjthread->head != NULL);
stop = rjthread->head->frame_base;
ssstop = rjthread->head->shadowstack_base;
@@ -98,11 +90,14 @@
rjthread->repeat_count = result;
/* snapshot of top frame: needed every time because longjmp() frees
- the previous one. Need to have mutex locked otherwise a concurrent
- GC may get garbage while saving shadow stack */
- s_mutex_lock();
+ the previous one. Note that this function is called with the
+ mutex already acquired. Although it's not the job of this file,
+ we assert it is indeed acquired here. This is needed, otherwise a
+ concurrent GC may get garbage while saving shadow stack */
+#ifdef _STM_CORE_H_
+ assert(_has_mutex());
+#endif
copy_stack(rjthread, (char *)&saved, saved.ss1);
- s_mutex_unlock();
return result;
}
@@ -114,7 +109,6 @@
current stack, expanding it if necessary. The shadowstack should
already be restored at this point (restore_shadowstack()) */
assert(rjthread->moved_off_base != NULL);
- s_mutex_lock();
while (rjthread->moved_off) {
struct _rewind_jmp_moved_s *p = rjthread->moved_off;
@@ -123,7 +117,6 @@
target -= p->stack_size;
if (target < stack_free) {
/* need more stack space! */
- s_mutex_unlock();
do_longjmp(rjthread, alloca(stack_free - target));
abort(); /* unreachable */
}
@@ -134,7 +127,12 @@
rj_free(p);
}
- s_mutex_unlock();
+#ifdef _STM_CORE_H_
+ /* This function must be called with the mutex held. It will
+ remain held across the longjmp that follows and into the
+ target rewind_jmp_setjmp() function. */
+ assert(_has_mutex());
+#endif
__builtin_longjmp(rjthread->jmpbuf, 1);
}
@@ -153,7 +151,9 @@
struct _rewind_jmp_moved_s *p = rjthread->moved_off;
char *sstarget = rjthread->moved_off_ssbase;
+#ifdef _STM_CORE_H_
assert(_has_mutex());
+#endif
while (p) {
if (p->shadowstack_size) {
@@ -178,21 +178,26 @@
{
/* called when leaving a frame. copies the now-current frame
to the list of stack-slices */
- s_mutex_lock();
+#ifdef _STM_CORE_H_
+ /* A transaction should be running now. This means in particular
+ that it's not possible that a major GC runs concurrently with
+ this code (and tries to read the shadowstack slice). */
+ assert(_seems_to_be_running_transaction());
+#endif
if (rjthread->head == NULL) {
_rewind_jmp_free_stack_slices(rjthread);
- s_mutex_unlock();
return;
}
assert(rjthread->moved_off_base < (char *)rjthread->head);
copy_stack(rjthread, rjthread->moved_off_base, rjthread->moved_off_ssbase);
- s_mutex_unlock();
}
void _rewind_jmp_free_stack_slices(rewind_jmp_thread *rjthread)
{
/* frees all saved stack copies */
- assert(_has_mutex());
+#ifdef _STM_CORE_H_
+ assert(_seems_to_be_running_transaction()); /* see previous function */
+#endif
struct _rewind_jmp_moved_s *p = rjthread->moved_off;
while (p) {
struct _rewind_jmp_moved_s *pnext = p->next;
From noreply at buildbot.pypy.org Thu Aug 14 15:02:56 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Thu, 14 Aug 2014 15:02:56 +0200 (CEST)
Subject: [pypy-commit] stmgc default: Fix (shown by demo_random2)
Message-ID: <20140814130256.4BD831C06C9@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r1314:94d281041161
Date: 2014-08-14 15:01 +0200
http://bitbucket.org/pypy/stmgc/changeset/94d281041161/
Log: Fix (shown by demo_random2)
diff --git a/c7/stm/core.c b/c7/stm/core.c
--- a/c7/stm/core.c
+++ b/c7/stm/core.c
@@ -1027,7 +1027,7 @@
}
#endif
-static void abort_with_mutex(void)
+static stm_thread_local_t *abort_with_mutex_no_longjmp(void)
{
assert(_has_mutex());
dprintf(("~~~ ABORT\n"));
@@ -1060,6 +1060,12 @@
/* Broadcast C_ABORTED to wake up contention.c */
cond_broadcast(C_ABORTED);
+ return tl;
+}
+
+static void abort_with_mutex(void)
+{
+ stm_thread_local_t *tl = abort_with_mutex_no_longjmp();
s_mutex_unlock();
/* It seems to be a good idea, at least in some examples, to sleep
diff --git a/c7/stm/core.h b/c7/stm/core.h
--- a/c7/stm/core.h
+++ b/c7/stm/core.h
@@ -272,6 +272,7 @@
static void teardown_core(void);
static void abort_with_mutex(void) __attribute__((noreturn));
+static stm_thread_local_t *abort_with_mutex_no_longjmp(void);
static void abort_data_structures_from_segment_num(int segment_num);
static inline bool was_read_remote(char *base, object_t *obj,
diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c
--- a/c7/stm/forksupport.c
+++ b/c7/stm/forksupport.c
@@ -181,20 +181,17 @@
assert(tl->associated_segment_num == i);
assert(pr->transaction_state == TS_REGULAR);
set_gs_register(get_segment_base(i));
+ assert(STM_SEGMENT->segment_num == i);
- rewind_jmp_buf rjbuf;
- stm_rewind_jmp_enterframe(tl, &rjbuf);
- if (stm_rewind_jmp_setjmp(tl) == 0) {
+ s_mutex_lock();
#ifndef NDEBUG
- pr->running_pthread = pthread_self();
+ pr->running_pthread = pthread_self();
#endif
- pr->pub.running_thread->shadowstack = (
- pr->shadowstack_at_start_of_transaction);
- strcpy(pr->marker_self, "fork");
- stm_abort_transaction();
- }
- stm_rewind_jmp_forget(tl);
- stm_rewind_jmp_leaveframe(tl, &rjbuf);
+ pr->pub.running_thread->shadowstack = (
+ pr->shadowstack_at_start_of_transaction);
+ strcpy(pr->marker_self, "fork");
+ abort_with_mutex_no_longjmp();
+ s_mutex_unlock();
}
static void forksupport_child(void)
From noreply at buildbot.pypy.org Thu Aug 14 15:14:14 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Thu, 14 Aug 2014 15:14:14 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: import stmgc/1815f493a1c5
Message-ID: <20140814131414.50A811C06C9@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72795:b56e8c73f107
Date: 2014-08-12 17:37 +0200
http://bitbucket.org/pypy/pypy/changeset/b56e8c73f107/
Log: import stmgc/1815f493a1c5
diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision
--- a/rpython/translator/stm/src_stm/revision
+++ b/rpython/translator/stm/src_stm/revision
@@ -1,1 +1,1 @@
-bdc151305c79
+1815f493a1c5
diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c
--- a/rpython/translator/stm/src_stm/stm/core.c
+++ b/rpython/translator/stm/src_stm/stm/core.c
@@ -394,7 +394,7 @@
#ifdef STM_NO_AUTOMATIC_SETJMP
long repeat_count = 0; /* test/support.py */
#else
- long repeat_count = rewind_jmp_setjmp(&tl->rjthread);
+ long repeat_count = stm_rewind_jmp_setjmp(tl);
#endif
_stm_start_transaction(tl, false);
return repeat_count;
@@ -829,7 +829,7 @@
dprintf(("commit_transaction\n"));
assert(STM_SEGMENT->nursery_end == NURSERY_END);
- rewind_jmp_forget(&STM_SEGMENT->running_thread->rjthread);
+ stm_rewind_jmp_forget(STM_SEGMENT->running_thread);
/* if a major collection is required, do it here */
if (is_major_collection_requested()) {
@@ -984,12 +984,23 @@
reset_modified_from_other_segments(segment_num);
_verify_cards_cleared_in_all_lists(pseg);
- /* reset the tl->shadowstack and thread_local_obj to their original
- value before the transaction start */
+ /* reset tl->shadowstack and thread_local_obj to their original
+ value before the transaction start. Also restore the content
+ of the shadowstack here. */
stm_thread_local_t *tl = pseg->pub.running_thread;
+#ifdef STM_NO_AUTOMATIC_SETJMP
+ /* In tests, we don't save and restore the shadowstack correctly.
+ Be sure to not change items below shadowstack_at_start_of_transaction.
+ There is no such restrictions in non-Python-based tests. */
assert(tl->shadowstack >= pseg->shadowstack_at_start_of_transaction);
- pseg->shadowstack_at_abort = tl->shadowstack;
tl->shadowstack = pseg->shadowstack_at_start_of_transaction;
+#else
+ /* NB. careful, this function might be called more than once to
+ abort a given segment. Make sure that
+ stm_rewind_jmp_restore_shadowstack() is idempotent. */
+ stm_rewind_jmp_restore_shadowstack(tl);
+ assert(tl->shadowstack == pseg->shadowstack_at_start_of_transaction);
+#endif
tl->thread_local_obj = pseg->threadlocal_at_start_of_transaction;
tl->last_abort__bytes_in_nursery = bytes_in_nursery;
@@ -1064,7 +1075,7 @@
#ifdef STM_NO_AUTOMATIC_SETJMP
_test_run_abort(tl);
#else
- rewind_jmp_longjmp(&tl->rjthread);
+ stm_rewind_jmp_longjmp(tl);
#endif
}
@@ -1079,7 +1090,7 @@
marker_fetch_inev();
wait_for_end_of_inevitable_transaction(NULL);
STM_PSEGMENT->transaction_state = TS_INEVITABLE;
- rewind_jmp_forget(&STM_SEGMENT->running_thread->rjthread);
+ stm_rewind_jmp_forget(STM_SEGMENT->running_thread);
clear_callbacks_on_abort();
}
else {
diff --git a/rpython/translator/stm/src_stm/stm/core.h b/rpython/translator/stm/src_stm/stm/core.h
--- a/rpython/translator/stm/src_stm/stm/core.h
+++ b/rpython/translator/stm/src_stm/stm/core.h
@@ -187,7 +187,6 @@
'thread_local_obj' field. */
struct stm_shadowentry_s *shadowstack_at_start_of_transaction;
object_t *threadlocal_at_start_of_transaction;
- struct stm_shadowentry_s *shadowstack_at_abort;
/* Already signalled to commit soon: */
bool signalled_to_commit_soon;
diff --git a/rpython/translator/stm/src_stm/stm/forksupport.c b/rpython/translator/stm/src_stm/stm/forksupport.c
--- a/rpython/translator/stm/src_stm/stm/forksupport.c
+++ b/rpython/translator/stm/src_stm/stm/forksupport.c
@@ -185,7 +185,7 @@
rewind_jmp_buf rjbuf;
stm_rewind_jmp_enterframe(tl, &rjbuf);
- if (rewind_jmp_setjmp(&tl->rjthread) == 0) {
+ if (stm_rewind_jmp_setjmp(tl) == 0) {
#ifndef NDEBUG
pr->running_pthread = pthread_self();
#endif
@@ -194,7 +194,7 @@
strcpy(pr->marker_self, "fork");
stm_abort_transaction();
}
- rewind_jmp_forget(&tl->rjthread);
+ stm_rewind_jmp_forget(tl);
stm_rewind_jmp_leaveframe(tl, &rjbuf);
}
diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h
--- a/rpython/translator/stm/src_stm/stmgc.h
+++ b/rpython/translator/stm/src_stm/stmgc.h
@@ -332,9 +332,20 @@
function with the interpreter's dispatch loop, you need to declare
a local variable of type 'rewind_jmp_buf' and call these macros. */
#define stm_rewind_jmp_enterframe(tl, rjbuf) \
- rewind_jmp_enterframe(&(tl)->rjthread, rjbuf)
+ rewind_jmp_enterframe(&(tl)->rjthread, rjbuf, (tl)->shadowstack)
#define stm_rewind_jmp_leaveframe(tl, rjbuf) \
- rewind_jmp_leaveframe(&(tl)->rjthread, rjbuf)
+ rewind_jmp_leaveframe(&(tl)->rjthread, rjbuf, (tl)->shadowstack)
+#define stm_rewind_jmp_setjmp(tl) \
+ rewind_jmp_setjmp(&(tl)->rjthread, (tl)->shadowstack)
+#define stm_rewind_jmp_longjmp(tl) \
+ rewind_jmp_longjmp(&(tl)->rjthread)
+#define stm_rewind_jmp_forget(tl) \
+ rewind_jmp_forget(&(tl)->rjthread)
+#define stm_rewind_jmp_restore_shadowstack(tl) do { \
+ assert(rewind_jmp_armed(&(tl)->rjthread)); \
+ (tl)->shadowstack = (struct stm_shadowentry_s *) \
+ rewind_jmp_restore_shadowstack(&(tl)->rjthread); \
+} while (0)
/* Starting and ending transactions. stm_read(), stm_write() and
stm_allocate() should only be called from within a transaction.
From noreply at buildbot.pypy.org Thu Aug 14 15:14:15 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Thu, 14 Aug 2014 15:14:15 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Comparing this with the
trunk version, I think that this is what is meant
Message-ID: <20140814131415.7A94F1C06C9@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72796:209a49d5e176
Date: 2014-08-12 17:37 +0200
http://bitbucket.org/pypy/pypy/changeset/209a49d5e176/
Log: Comparing this with the trunk version, I think that this is what is
meant
diff --git a/rpython/jit/backend/tool/viewcode.py b/rpython/jit/backend/tool/viewcode.py
--- a/rpython/jit/backend/tool/viewcode.py
+++ b/rpython/jit/backend/tool/viewcode.py
@@ -116,7 +116,7 @@
p = subprocess.Popen(symbollister % filename, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
- if not p.returncode:
+ if p.returncode:
raise Exception('Encountered an error running nm: %s' %
stderr)
for line in stdout.splitlines(True):
From noreply at buildbot.pypy.org Thu Aug 14 15:14:16 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Thu, 14 Aug 2014 15:14:16 +0200 (CEST)
Subject: [pypy-commit] pypy default: issue #1832: provide a better error
message for some cases
Message-ID: <20140814131416.BB6C71C06C9@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r72797:ed0f03db54a8
Date: 2014-08-14 15:13 +0200
http://bitbucket.org/pypy/pypy/changeset/ed0f03db54a8/
Log: issue #1832: provide a better error message for some cases
diff --git a/rpython/rtyper/normalizecalls.py b/rpython/rtyper/normalizecalls.py
--- a/rpython/rtyper/normalizecalls.py
+++ b/rpython/rtyper/normalizecalls.py
@@ -62,6 +62,8 @@
msg.append("the following functions:")
msg.append(" %s" % ("\n ".join(pfg), ))
msg.append("are called with inconsistent numbers of arguments")
+ msg.append("(and/or the argument names are different, which is"
+ " not supported in this case)")
if shape1[0] != shape2[0]:
msg.append("sometimes with %s arguments, sometimes with %s" % (shape1[0], shape2[0]))
else:
diff --git a/rpython/rtyper/test/test_normalizecalls.py b/rpython/rtyper/test/test_normalizecalls.py
--- a/rpython/rtyper/test/test_normalizecalls.py
+++ b/rpython/rtyper/test/test_normalizecalls.py
@@ -185,6 +185,7 @@
.+Sub1.fn
.+Sub2.fn
are called with inconsistent numbers of arguments
+\(and/or the argument names are different, which is not supported in this case\)
sometimes with \d arguments, sometimes with \d
the callers of these functions are:
.+otherfunc
From noreply at buildbot.pypy.org Thu Aug 14 15:40:12 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Thu, 14 Aug 2014 15:40:12 +0200 (CEST)
Subject: [pypy-commit] stmgc default: More attempt to fix demo_random2
Message-ID: <20140814134012.ED5F31C0323@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r1315:29376f500349
Date: 2014-08-14 15:38 +0200
http://bitbucket.org/pypy/stmgc/changeset/29376f500349/
Log: More attempt to fix demo_random2
diff --git a/c7/stm/core.c b/c7/stm/core.c
--- a/c7/stm/core.c
+++ b/c7/stm/core.c
@@ -1000,7 +1000,8 @@
/* we need to do this here and not directly in rewind_longjmp() because
that is called when we already released everything (safe point)
and a concurrent major GC could mess things up. */
- stm_rewind_jmp_restore_shadowstack(tl);
+ if (tl->shadowstack != NULL)
+ stm_rewind_jmp_restore_shadowstack(tl);
assert(tl->shadowstack == pseg->shadowstack_at_start_of_transaction);
#endif
tl->thread_local_obj = pseg->threadlocal_at_start_of_transaction;
diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c
--- a/c7/stm/forksupport.c
+++ b/c7/stm/forksupport.c
@@ -187,9 +187,10 @@
#ifndef NDEBUG
pr->running_pthread = pthread_self();
#endif
- pr->pub.running_thread->shadowstack = (
- pr->shadowstack_at_start_of_transaction);
strcpy(pr->marker_self, "fork");
+ tl->shadowstack = NULL;
+ pr->shadowstack_at_start_of_transaction = NULL;
+ stm_rewind_jmp_forget(tl);
abort_with_mutex_no_longjmp();
s_mutex_unlock();
}
diff --git a/c7/stm/rewind_setjmp.c b/c7/stm/rewind_setjmp.c
--- a/c7/stm/rewind_setjmp.c
+++ b/c7/stm/rewind_setjmp.c
@@ -195,9 +195,6 @@
void _rewind_jmp_free_stack_slices(rewind_jmp_thread *rjthread)
{
/* frees all saved stack copies */
-#ifdef _STM_CORE_H_
- assert(_seems_to_be_running_transaction()); /* see previous function */
-#endif
struct _rewind_jmp_moved_s *p = rjthread->moved_off;
while (p) {
struct _rewind_jmp_moved_s *pnext = p->next;
From noreply at buildbot.pypy.org Thu Aug 14 16:19:17 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Thu, 14 Aug 2014 16:19:17 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Fix the double definition of
_GNU_SOURCE
Message-ID: <20140814141917.B3D8C1C0323@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72798:654eb5a6b76f
Date: 2014-08-14 16:03 +0200
http://bitbucket.org/pypy/pypy/changeset/654eb5a6b76f/
Log: Fix the double definition of _GNU_SOURCE
diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py
--- a/rpython/translator/c/genc.py
+++ b/rpython/translator/c/genc.py
@@ -919,14 +919,6 @@
filename = targetdir.join(modulename + '.c')
f = filename.open('w')
- if database.with_stm:
- print >> f, '/* XXX temporary, for SYS_arch_prctl below */'
- print >> f, '#define _GNU_SOURCE'
- print >> f, '#include '
- print >> f, '#include '
- print >> f, '#include '
- print >> f, '#include '
- print >> f
incfilename = targetdir.join('common_header.h')
fi = incfilename.open('w')
fi.write('#ifndef _PY_COMMON_HEADER_H\n#define _PY_COMMON_HEADER_H\n')
@@ -935,6 +927,12 @@
# Header
#
print >> f, '#include "common_header.h"'
+ if database.with_stm:
+ print >> f, '/* XXX temporary, for SYS_arch_prctl below */'
+ print >> f, '#include '
+ print >> f, '#include '
+ print >> f, '#include '
+ print >> f, '#include '
print >> f
commondefs(defines)
for key, value in defines.items():
From noreply at buildbot.pypy.org Thu Aug 14 16:19:19 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Thu, 14 Aug 2014 16:19:19 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Hack hack hack: going for
the minimal amount of changes first
Message-ID: <20140814141919.1525B1C0323@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72799:2cc154f95ab3
Date: 2014-08-14 16:17 +0200
http://bitbucket.org/pypy/pypy/changeset/2cc154f95ab3/
Log: Hack hack hack: going for the minimal amount of changes first
diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c
--- a/rpython/translator/stm/src_stm/stmgcintf.c
+++ b/rpython/translator/stm/src_stm/stmgcintf.c
@@ -155,34 +155,35 @@
pypy_stm_nursery_low_fill_mark = _stm_nursery_start + limit;
}
-void pypy_stm_start_transaction(stm_jmpbuf_t *jmpbuf_ptr,
- volatile long *v_counter)
+static long _pypy_stm_start_transaction(void)
{
pypy_stm_nursery_low_fill_mark = 1; /* will be set to a correct value below */
- _stm_start_transaction(&stm_thread_local, jmpbuf_ptr);
+ long counter = stm_start_transaction(&stm_thread_local);
- _pypy_stm_initialize_nursery_low_fill_mark(*v_counter);
- *v_counter = *v_counter + 1;
+ _pypy_stm_initialize_nursery_low_fill_mark(counter);
pypy_stm_ready_atomic = 1; /* reset after abort */
+
+ return counter;
}
void pypy_stm_perform_transaction(object_t *arg, int callback(object_t *, int))
{ /* must save roots around this call */
- stm_jmpbuf_t jmpbuf;
- long volatile v_counter = 0;
- int (*volatile v_callback)(object_t *, int) = callback;
+ //
+ // XXX this function should be killed! We no longer need a
+ // callback-based approach at all.
+
#ifndef NDEBUG
struct stm_shadowentry_s *volatile v_old_shadowstack =
stm_thread_local.shadowstack;
#endif
-
+ rewind_jmp_buf rjbuf;
+ stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf);
//STM_PUSH_ROOT(stm_thread_local, STM_STACK_MARKER_NEW);
STM_PUSH_ROOT(stm_thread_local, arg);
while (1) {
long counter;
-
if (pypy_stm_should_break_transaction()) { //pypy_stm_ready_atomic == 1) {
/* Not in an atomic transaction; but it might be an inevitable
transaction.
@@ -191,18 +192,12 @@
stm_commit_transaction();
- /* After setjmp(), the local variables v_* are preserved because
- they are volatile. The other local variables should be
- declared below than this point only.
- */
- while (__builtin_setjmp(jmpbuf) == 1) { /*redo setjmp*/ }
- counter = v_counter;
- pypy_stm_start_transaction(&jmpbuf, &v_counter);
+ counter = _pypy_stm_start_transaction();
}
else {
/* In an atomic transaction */
//assert(pypy_stm_nursery_low_fill_mark == (uintptr_t) -1);
- counter = v_counter;
+ counter = 0;
}
/* invoke the callback in the new transaction */
@@ -210,32 +205,17 @@
assert(v_old_shadowstack == stm_thread_local.shadowstack);// - 1);
STM_PUSH_ROOT(stm_thread_local, arg);
- long result = v_callback(arg, counter);
+ long result = callback(arg, counter);
if (result <= 0)
break;
- v_counter = 0;
- }
-
- if (STM_SEGMENT->jmpbuf_ptr == &jmpbuf) {
- /* we can't leave this function leaving a non-inevitable
- transaction whose jmpbuf points into this function.
- we could break the transaction here but we instead rely
- on the caller to break it. Since we have to use an inevitable
- transaction anyway, using the current one may be cheaper.
- */
- _stm_become_inevitable("perform_transaction left with inevitable");
- }
- /* double-check */
- if (pypy_stm_ready_atomic == 1) {
- }
- else {
- assert(pypy_stm_nursery_low_fill_mark == (uintptr_t) -1);
}
STM_POP_ROOT_RET(stm_thread_local); /* pop the 'arg' */
//uintptr_t x = (uintptr_t)STM_POP_ROOT_RET(stm_thread_local);
//assert(x == STM_STACK_MARKER_NEW || x == STM_STACK_MARKER_OLD);
assert(v_old_shadowstack == stm_thread_local.shadowstack);
+
+ stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf);
}
void _pypy_stm_inev_state(void)
diff --git a/rpython/translator/stm/src_stm/stmgcintf.h b/rpython/translator/stm/src_stm/stmgcintf.h
--- a/rpython/translator/stm/src_stm/stmgcintf.h
+++ b/rpython/translator/stm/src_stm/stmgcintf.h
@@ -39,7 +39,7 @@
static inline void pypy_stm_become_inevitable(const char *msg)
{
assert(STM_SEGMENT->running_thread == &stm_thread_local);
- if (STM_SEGMENT->jmpbuf_ptr != NULL) {
+ if (!stm_is_inevitable()) {
_pypy_stm_become_inevitable(msg);
}
}
@@ -92,8 +92,7 @@
long pypy_stm_enter_callback_call(void);
void pypy_stm_leave_callback_call(long);
void pypy_stm_set_transaction_length(double);
-void pypy_stm_perform_transaction(object_t *, int(object_t *, int));
-void pypy_stm_start_transaction(stm_jmpbuf_t *, volatile long *);
+void pypy_stm_perform_transaction(object_t *, int(object_t *, int));//XXX
static inline int pypy_stm_should_break_transaction(void)
{
From noreply at buildbot.pypy.org Thu Aug 14 16:19:20 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Thu, 14 Aug 2014 16:19:20 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: import stmgc/29376f500349
Message-ID: <20140814141920.4912C1C0323@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72800:057a885fd864
Date: 2014-08-14 16:17 +0200
http://bitbucket.org/pypy/pypy/changeset/057a885fd864/
Log: import stmgc/29376f500349
diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision
--- a/rpython/translator/stm/src_stm/revision
+++ b/rpython/translator/stm/src_stm/revision
@@ -1,1 +1,1 @@
-1815f493a1c5
+29376f500349
diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c
--- a/rpython/translator/stm/src_stm/stm/core.c
+++ b/rpython/translator/stm/src_stm/stm/core.c
@@ -329,8 +329,6 @@
{
assert(!_stm_in_transaction(tl));
- s_mutex_lock();
-
retry:
if (inevitable) {
wait_for_end_of_inevitable_transaction(tl);
@@ -391,6 +389,7 @@
long stm_start_transaction(stm_thread_local_t *tl)
{
+ s_mutex_lock();
#ifdef STM_NO_AUTOMATIC_SETJMP
long repeat_count = 0; /* test/support.py */
#else
@@ -402,6 +401,7 @@
void stm_start_inevitable_transaction(stm_thread_local_t *tl)
{
+ s_mutex_lock();
_stm_start_transaction(tl, true);
}
@@ -998,7 +998,11 @@
/* NB. careful, this function might be called more than once to
abort a given segment. Make sure that
stm_rewind_jmp_restore_shadowstack() is idempotent. */
- stm_rewind_jmp_restore_shadowstack(tl);
+ /* we need to do this here and not directly in rewind_longjmp() because
+ that is called when we already released everything (safe point)
+ and a concurrent major GC could mess things up. */
+ if (tl->shadowstack != NULL)
+ stm_rewind_jmp_restore_shadowstack(tl);
assert(tl->shadowstack == pseg->shadowstack_at_start_of_transaction);
#endif
tl->thread_local_obj = pseg->threadlocal_at_start_of_transaction;
@@ -1025,7 +1029,7 @@
}
#endif
-static void abort_with_mutex(void)
+static stm_thread_local_t *abort_with_mutex_no_longjmp(void)
{
assert(_has_mutex());
dprintf(("~~~ ABORT\n"));
@@ -1058,6 +1062,12 @@
/* Broadcast C_ABORTED to wake up contention.c */
cond_broadcast(C_ABORTED);
+ return tl;
+}
+
+static void abort_with_mutex(void)
+{
+ stm_thread_local_t *tl = abort_with_mutex_no_longjmp();
s_mutex_unlock();
/* It seems to be a good idea, at least in some examples, to sleep
@@ -1075,6 +1085,7 @@
#ifdef STM_NO_AUTOMATIC_SETJMP
_test_run_abort(tl);
#else
+ s_mutex_lock();
stm_rewind_jmp_longjmp(tl);
#endif
}
diff --git a/rpython/translator/stm/src_stm/stm/core.h b/rpython/translator/stm/src_stm/stm/core.h
--- a/rpython/translator/stm/src_stm/stm/core.h
+++ b/rpython/translator/stm/src_stm/stm/core.h
@@ -273,6 +273,7 @@
static void teardown_core(void);
static void abort_with_mutex(void) __attribute__((noreturn));
+static stm_thread_local_t *abort_with_mutex_no_longjmp(void);
static void abort_data_structures_from_segment_num(int segment_num);
static inline bool was_read_remote(char *base, object_t *obj,
diff --git a/rpython/translator/stm/src_stm/stm/forksupport.c b/rpython/translator/stm/src_stm/stm/forksupport.c
--- a/rpython/translator/stm/src_stm/stm/forksupport.c
+++ b/rpython/translator/stm/src_stm/stm/forksupport.c
@@ -182,20 +182,18 @@
assert(tl->associated_segment_num == i);
assert(pr->transaction_state == TS_REGULAR);
set_gs_register(get_segment_base(i));
+ assert(STM_SEGMENT->segment_num == i);
- rewind_jmp_buf rjbuf;
- stm_rewind_jmp_enterframe(tl, &rjbuf);
- if (stm_rewind_jmp_setjmp(tl) == 0) {
+ s_mutex_lock();
#ifndef NDEBUG
- pr->running_pthread = pthread_self();
+ pr->running_pthread = pthread_self();
#endif
- pr->pub.running_thread->shadowstack = (
- pr->shadowstack_at_start_of_transaction);
- strcpy(pr->marker_self, "fork");
- stm_abort_transaction();
- }
+ strcpy(pr->marker_self, "fork");
+ tl->shadowstack = NULL;
+ pr->shadowstack_at_start_of_transaction = NULL;
stm_rewind_jmp_forget(tl);
- stm_rewind_jmp_leaveframe(tl, &rjbuf);
+ abort_with_mutex_no_longjmp();
+ s_mutex_unlock();
}
static void forksupport_child(void)
diff --git a/rpython/translator/stm/src_stm/stm/gcpage.c b/rpython/translator/stm/src_stm/stm/gcpage.c
--- a/rpython/translator/stm/src_stm/stm/gcpage.c
+++ b/rpython/translator/stm/src_stm/stm/gcpage.c
@@ -364,6 +364,17 @@
mark_trace(obj, segment_base);
}
+static void *mark_visit_objects_from_ss(void *_, const void *slice, size_t size)
+{
+ const struct stm_shadowentry_s *p, *end;
+ p = (const struct stm_shadowentry_s *)slice;
+ end = (const struct stm_shadowentry_s *)(slice + size);
+ for (; p < end; p++)
+ if ((((uintptr_t)p->ss) & 3) == 0)
+ mark_visit_object(p->ss, stm_object_pages);
+ return NULL;
+}
+
static void mark_visit_from_roots(void)
{
if (testing_prebuilt_objs != NULL) {
@@ -393,10 +404,14 @@
long i;
for (i = 1; i <= NB_SEGMENTS; i++) {
- if (get_priv_segment(i)->transaction_state != TS_NONE)
+ if (get_priv_segment(i)->transaction_state != TS_NONE) {
mark_visit_object(
get_priv_segment(i)->threadlocal_at_start_of_transaction,
get_segment_base(i));
+ stm_rewind_jmp_enum_shadowstack(
+ get_segment(i)->running_thread,
+ mark_visit_objects_from_ss);
+ }
}
}
diff --git a/rpython/translator/stm/src_stm/stm/marker.c b/rpython/translator/stm/src_stm/stm/marker.c
--- a/rpython/translator/stm/src_stm/stm/marker.c
+++ b/rpython/translator/stm/src_stm/stm/marker.c
@@ -19,10 +19,9 @@
struct stm_shadowentry_s *current = tl->shadowstack - 1;
struct stm_shadowentry_s *base = tl->shadowstack_base;
- /* The shadowstack_base contains STM_STACK_MARKER_OLD, which is
- a convenient stopper for the loop below but which shouldn't
- be returned. */
- assert(base->ss == (object_t *)STM_STACK_MARKER_OLD);
+ /* The shadowstack_base contains -1, which is a convenient stopper for
+ the loop below but which shouldn't be returned. */
+ assert(base->ss == (object_t *)-1);
while (!(((uintptr_t)current->ss) & 1)) {
current--;
diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c
--- a/rpython/translator/stm/src_stm/stm/nursery.c
+++ b/rpython/translator/stm/src_stm/stm/nursery.c
@@ -157,27 +157,22 @@
{
stm_thread_local_t *tl = STM_SEGMENT->running_thread;
struct stm_shadowentry_s *current = tl->shadowstack;
- struct stm_shadowentry_s *base = tl->shadowstack_base;
- while (1) {
+ struct stm_shadowentry_s *finalbase = tl->shadowstack_base;
+ struct stm_shadowentry_s *ssbase;
+ ssbase = (struct stm_shadowentry_s *)tl->rjthread.moved_off_ssbase;
+ if (ssbase == NULL)
+ ssbase = finalbase;
+ else
+ assert(finalbase <= ssbase && ssbase <= current);
+
+ while (current > ssbase) {
--current;
- OPT_ASSERT(current >= base);
-
uintptr_t x = (uintptr_t)current->ss;
if ((x & 3) == 0) {
/* the stack entry is a regular pointer (possibly NULL) */
minor_trace_if_young(¤t->ss);
}
- else if (x == STM_STACK_MARKER_NEW) {
- /* the marker was not already seen: mark it as seen,
- but continue looking more deeply in the shadowstack */
- current->ss = (object_t *)STM_STACK_MARKER_OLD;
- }
- else if (x == STM_STACK_MARKER_OLD) {
- /* the marker was already seen: we can stop the
- root stack tracing at this point */
- break;
- }
else {
/* it is an odd-valued marker, ignore */
}
diff --git a/rpython/translator/stm/src_stm/stm/setup.c b/rpython/translator/stm/src_stm/stm/setup.c
--- a/rpython/translator/stm/src_stm/stm/setup.c
+++ b/rpython/translator/stm/src_stm/stm/setup.c
@@ -202,13 +202,13 @@
struct stm_shadowentry_s *s = (struct stm_shadowentry_s *)start;
tl->shadowstack = s;
tl->shadowstack_base = s;
- STM_PUSH_ROOT(*tl, STM_STACK_MARKER_OLD);
+ STM_PUSH_ROOT(*tl, -1);
}
static void _done_shadow_stack(stm_thread_local_t *tl)
{
assert(tl->shadowstack > tl->shadowstack_base);
- assert(tl->shadowstack_base->ss == (object_t *)STM_STACK_MARKER_OLD);
+ assert(tl->shadowstack_base->ss == (object_t *)-1);
char *start = (char *)tl->shadowstack_base;
_shadowstack_trap_page(start, PROT_READ | PROT_WRITE);
diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h
--- a/rpython/translator/stm/src_stm/stmgc.h
+++ b/rpython/translator/stm/src_stm/stmgc.h
@@ -314,8 +314,6 @@
#define STM_PUSH_ROOT(tl, p) ((tl).shadowstack++->ss = (object_t *)(p))
#define STM_POP_ROOT(tl, p) ((p) = (typeof(p))((--(tl).shadowstack)->ss))
#define STM_POP_ROOT_RET(tl) ((--(tl).shadowstack)->ss)
-#define STM_STACK_MARKER_NEW (-41)
-#define STM_STACK_MARKER_OLD (-43)
/* Every thread needs to have a corresponding stm_thread_local_t
@@ -346,6 +344,8 @@
(tl)->shadowstack = (struct stm_shadowentry_s *) \
rewind_jmp_restore_shadowstack(&(tl)->rjthread); \
} while (0)
+#define stm_rewind_jmp_enum_shadowstack(tl, callback) \
+ rewind_jmp_enum_shadowstack(&(tl)->rjthread, callback)
/* Starting and ending transactions. stm_read(), stm_write() and
stm_allocate() should only be called from within a transaction.
From noreply at buildbot.pypy.org Thu Aug 14 16:19:21 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Thu, 14 Aug 2014 16:19:21 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Fix.
Message-ID: <20140814141921.7381A1C0323@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72801:8f655372a008
Date: 2014-08-14 16:18 +0200
http://bitbucket.org/pypy/pypy/changeset/8f655372a008/
Log: Fix.
diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c
--- a/rpython/translator/stm/src_stm/stmgcintf.c
+++ b/rpython/translator/stm/src_stm/stmgcintf.c
@@ -248,7 +248,7 @@
void pypy_stm_become_globally_unique_transaction(void)
{
- if (STM_SEGMENT->jmpbuf_ptr != NULL) {
+ if (!stm_is_inevitable()) {
_pypy_stm_inev_state();
}
stm_become_globally_unique_transaction(&stm_thread_local, "for the JIT");
From noreply at buildbot.pypy.org Thu Aug 14 16:22:16 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Thu, 14 Aug 2014 16:22:16 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Forgot to add these files
Message-ID: <20140814142216.BCFFE1C0323@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72802:52e94842f3f8
Date: 2014-08-14 16:21 +0200
http://bitbucket.org/pypy/pypy/changeset/52e94842f3f8/
Log: Forgot to add these files
diff --git a/rpython/translator/stm/src_stm/stm/rewind_setjmp.c b/rpython/translator/stm/src_stm/stm/rewind_setjmp.c
new file mode 100644
--- /dev/null
+++ b/rpython/translator/stm/src_stm/stm/rewind_setjmp.c
@@ -0,0 +1,208 @@
+/* Imported by rpython/translator/stm/import_stmgc.py */
+#include "rewind_setjmp.h"
+#include
+#include
+#include
+#include
+
+
+struct _rewind_jmp_moved_s {
+ struct _rewind_jmp_moved_s *next;
+ size_t stack_size;
+ size_t shadowstack_size;
+};
+#define RJM_HEADER sizeof(struct _rewind_jmp_moved_s)
+
+#ifndef RJBUF_CUSTOM_MALLOC
+#define rj_malloc malloc
+#define rj_free free
+#else
+void *rj_malloc(size_t);
+void rj_free(void *);
+#endif
+
+
+static void copy_stack(rewind_jmp_thread *rjthread, char *base, void *ssbase)
+{
+ /* Copy away part of the stack and shadowstack. Sets moved_off_base to
+ the current frame_base.
+
+ The stack is copied between 'base' (lower limit, i.e. newest bytes)
+ and 'rjthread->head->frame_base' (upper limit, i.e. oldest bytes).
+ The shadowstack is copied between 'ssbase' (upper limit, newest)
+ and 'rjthread->head->shadowstack_base' (lower limit, oldest).
+ */
+ struct _rewind_jmp_moved_s *next;
+ char *stop;
+ void *ssstop;
+ size_t stack_size, ssstack_size;
+
+ assert(rjthread->head != NULL);
+ stop = rjthread->head->frame_base;
+ ssstop = rjthread->head->shadowstack_base;
+ assert(stop >= base);
+ assert(ssstop <= ssbase);
+ stack_size = stop - base;
+ ssstack_size = ssbase - ssstop;
+
+ next = (struct _rewind_jmp_moved_s *)
+ rj_malloc(RJM_HEADER + stack_size + ssstack_size);
+ assert(next != NULL); /* XXX out of memory */
+ next->next = rjthread->moved_off;
+ next->stack_size = stack_size;
+ next->shadowstack_size = ssstack_size;
+
+ memcpy(((char *)next) + RJM_HEADER, base, stack_size);
+ memcpy(((char *)next) + RJM_HEADER + stack_size, ssstop,
+ ssstack_size);
+
+ rjthread->moved_off_base = stop;
+ rjthread->moved_off_ssbase = ssstop;
+ rjthread->moved_off = next;
+}
+
+__attribute__((noinline))
+long rewind_jmp_setjmp(rewind_jmp_thread *rjthread, void *ss)
+{
+ /* saves the current stack frame to the list of slices and
+ calls setjmp(). It returns the number of times a longjmp()
+ jumped back to this setjmp() */
+ if (rjthread->moved_off) {
+ /* old stack slices are not needed anymore (next longjmp()
+ will restore only to this setjmp()) */
+ _rewind_jmp_free_stack_slices(rjthread);
+ }
+ /* all locals of this function that need to be saved and restored
+ across the setjmp() should be stored inside this structure */
+ struct { void *ss1; rewind_jmp_thread *rjthread1; } volatile saved =
+ { ss, rjthread };
+
+ int result;
+ if (__builtin_setjmp(rjthread->jmpbuf) == 0) {
+ rjthread = saved.rjthread1;
+ rjthread->initial_head = rjthread->head;
+ result = 0;
+ }
+ else {
+ rjthread = saved.rjthread1;
+ rjthread->head = rjthread->initial_head;
+ result = rjthread->repeat_count + 1;
+ }
+ rjthread->repeat_count = result;
+
+ /* snapshot of top frame: needed every time because longjmp() frees
+ the previous one. Note that this function is called with the
+ mutex already acquired. Although it's not the job of this file,
+ we assert it is indeed acquired here. This is needed, otherwise a
+ concurrent GC may get garbage while saving shadow stack */
+#ifdef _STM_CORE_H_
+ assert(_has_mutex());
+#endif
+ copy_stack(rjthread, (char *)&saved, saved.ss1);
+
+ return result;
+}
+
+__attribute__((noinline, noreturn))
+static void do_longjmp(rewind_jmp_thread *rjthread, char *stack_free)
+{
+ /* go through list of copied stack-slices and copy them back to the
+ current stack, expanding it if necessary. The shadowstack should
+ already be restored at this point (restore_shadowstack()) */
+ assert(rjthread->moved_off_base != NULL);
+
+ while (rjthread->moved_off) {
+ struct _rewind_jmp_moved_s *p = rjthread->moved_off;
+ char *target = rjthread->moved_off_base;
+ /* CPU stack grows downwards: */
+ target -= p->stack_size;
+ if (target < stack_free) {
+ /* need more stack space! */
+ do_longjmp(rjthread, alloca(stack_free - target));
+ abort(); /* unreachable */
+ }
+ memcpy(target, ((char *)p) + RJM_HEADER, p->stack_size);
+
+ rjthread->moved_off_base = target;
+ rjthread->moved_off = p->next;
+ rj_free(p);
+ }
+
+#ifdef _STM_CORE_H_
+ /* This function must be called with the mutex held. It will
+ remain held across the longjmp that follows and into the
+ target rewind_jmp_setjmp() function. */
+ assert(_has_mutex());
+#endif
+ __builtin_longjmp(rjthread->jmpbuf, 1);
+}
+
+__attribute__((noreturn))
+void rewind_jmp_longjmp(rewind_jmp_thread *rjthread)
+{
+ char _rewind_jmp_marker;
+ do_longjmp(rjthread, &_rewind_jmp_marker);
+}
+
+
+char *rewind_jmp_enum_shadowstack(rewind_jmp_thread *rjthread,
+ void *callback(void *, const void *, size_t))
+{
+ /* enumerate all saved shadow-stack slices */
+ struct _rewind_jmp_moved_s *p = rjthread->moved_off;
+ char *sstarget = rjthread->moved_off_ssbase;
+
+#ifdef _STM_CORE_H_
+ assert(_has_mutex());
+#endif
+
+ while (p) {
+ if (p->shadowstack_size) {
+ void *ss_slice = ((char *)p) + RJM_HEADER + p->stack_size;
+ callback(sstarget, ss_slice, p->shadowstack_size);
+
+ sstarget += p->shadowstack_size;
+ }
+ p = p->next;
+ }
+ return sstarget;
+}
+
+
+char *rewind_jmp_restore_shadowstack(rewind_jmp_thread *rjthread)
+{
+ return rewind_jmp_enum_shadowstack(rjthread, memcpy);
+}
+
+__attribute__((noinline))
+void _rewind_jmp_copy_stack_slice(rewind_jmp_thread *rjthread)
+{
+ /* called when leaving a frame. copies the now-current frame
+ to the list of stack-slices */
+#ifdef _STM_CORE_H_
+ /* A transaction should be running now. This means in particular
+ that it's not possible that a major GC runs concurrently with
+ this code (and tries to read the shadowstack slice). */
+ assert(_seems_to_be_running_transaction());
+#endif
+ if (rjthread->head == NULL) {
+ _rewind_jmp_free_stack_slices(rjthread);
+ return;
+ }
+ assert(rjthread->moved_off_base < (char *)rjthread->head);
+ copy_stack(rjthread, rjthread->moved_off_base, rjthread->moved_off_ssbase);
+}
+
+void _rewind_jmp_free_stack_slices(rewind_jmp_thread *rjthread)
+{
+ /* frees all saved stack copies */
+ struct _rewind_jmp_moved_s *p = rjthread->moved_off;
+ while (p) {
+ struct _rewind_jmp_moved_s *pnext = p->next;
+ rj_free(p);
+ p = pnext;
+ }
+ rjthread->moved_off = NULL;
+ rjthread->moved_off_base = NULL;
+ rjthread->moved_off_ssbase = NULL;
+}
diff --git a/rpython/translator/stm/src_stm/stm/rewind_setjmp.h b/rpython/translator/stm/src_stm/stm/rewind_setjmp.h
new file mode 100644
--- /dev/null
+++ b/rpython/translator/stm/src_stm/stm/rewind_setjmp.h
@@ -0,0 +1,109 @@
+/* Imported by rpython/translator/stm/import_stmgc.py */
+#ifndef _REWIND_SETJMP_H_
+#define _REWIND_SETJMP_H_
+
+
+#include
+
+/************************************************************
+There is a singly-linked list of frames in each thread
+rjthread->head->prev->prev->prev
+
+Another singly-linked list is the list of copied stack-slices.
+When doing a setjmp(), we copy the top-frame, free all old
+stack-slices, and link it to the top-frame->moved_off.
+When returning from the top-frame while moved_off still points
+to a slice, we also need to copy the top-frame->prev frame/slice
+and add it to this list (pointed to by moved_off).
+--------------------------------------------------------------
+
+ : : ^^^^^
+ |-------------------| older frames in the stack
+ | prev=0 |
+ ,---> | rewind_jmp_buf |
+ | |-------------------|
+ | | |
+ | : :
+ | : :
+ | | |
+ | |-------------------|
+ `---------prev |
+ ,----> | rewind_jmp_buf |
+ | +-------------------|
+ | | |
+ | : :
+ | | |
+ | |-------------------|
+ `----------prev |
+ ,---> | rewind_jmp_buf | <--------------- MOVED_OFF_BASE
+ | |---------------- +-------------+
+ | | | STACK COPY |
+ | | : :
+ | : | size |
+ | | | next | <---- MOVED_OFF
+ | | +---|------ +-------------+
+ | | | | | STACK COPY |
+ | |-------------------| | : (SEQUEL) :
+ `---------prev | | : :
+HEAD-----> | rewind_jmp_buf | | | |
+ |-------------------| | | size |
+ `------> | next=0 |
+ +-------------+
+
+
+************************************************************/
+
+typedef struct _rewind_jmp_buf {
+ char *frame_base;
+ char *shadowstack_base;
+ struct _rewind_jmp_buf *prev;
+} rewind_jmp_buf;
+
+typedef struct {
+ rewind_jmp_buf *head;
+ rewind_jmp_buf *initial_head;
+ char *moved_off_base;
+ char *moved_off_ssbase;
+ struct _rewind_jmp_moved_s *moved_off;
+ void *jmpbuf[5];
+ long repeat_count;
+} rewind_jmp_thread;
+
+
+/* remember the current stack and ss_stack positions */
+#define rewind_jmp_enterframe(rjthread, rjbuf, ss) do { \
+ (rjbuf)->frame_base = __builtin_frame_address(0); \
+ (rjbuf)->shadowstack_base = (char *)(ss); \
+ (rjbuf)->prev = (rjthread)->head; \
+ (rjthread)->head = (rjbuf); \
+} while (0)
+
+/* go up one frame. if there was a setjmp call in this frame,
+ */
+#define rewind_jmp_leaveframe(rjthread, rjbuf, ss) do { \
+ assert((rjbuf)->shadowstack_base == (char *)(ss)); \
+ (rjthread)->head = (rjbuf)->prev; \
+ if ((rjbuf)->frame_base == (rjthread)->moved_off_base) { \
+ assert((rjthread)->moved_off_ssbase == (char *)(ss));\
+ _rewind_jmp_copy_stack_slice(rjthread); \
+ } \
+} while (0)
+
+long rewind_jmp_setjmp(rewind_jmp_thread *rjthread, void *ss);
+void rewind_jmp_longjmp(rewind_jmp_thread *rjthread) __attribute__((noreturn));
+char *rewind_jmp_restore_shadowstack(rewind_jmp_thread *rjthread);
+char *rewind_jmp_enum_shadowstack(rewind_jmp_thread *rjthread,
+ void *callback(void *, const void *, size_t));
+
+#define rewind_jmp_forget(rjthread) do { \
+ if ((rjthread)->moved_off) _rewind_jmp_free_stack_slices(rjthread); \
+ (rjthread)->moved_off_base = 0; \
+ (rjthread)->moved_off_ssbase = 0; \
+} while (0)
+
+void _rewind_jmp_copy_stack_slice(rewind_jmp_thread *);
+void _rewind_jmp_free_stack_slices(rewind_jmp_thread *);
+
+#define rewind_jmp_armed(rjthread) ((rjthread)->moved_off_base != 0)
+
+#endif
From noreply at buildbot.pypy.org Thu Aug 14 17:29:47 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Thu, 14 Aug 2014 17:29:47 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Test and fix
Message-ID: <20140814152947.5524F1C0323@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72803:7182ba694a88
Date: 2014-08-14 17:29 +0200
http://bitbucket.org/pypy/pypy/changeset/7182ba694a88/
Log: Test and fix
diff --git a/rpython/translator/stm/inevitable.py b/rpython/translator/stm/inevitable.py
--- a/rpython/translator/stm/inevitable.py
+++ b/rpython/translator/stm/inevitable.py
@@ -20,6 +20,7 @@
'jit_assembler_call', 'gc_writebarrier',
'shrink_array', 'jit_stm_transaction_break_point',
'jit_stm_should_break_transaction',
+ 'threadlocalref_get', 'threadlocalref_set',
])
ALWAYS_ALLOW_OPERATIONS |= set(lloperation.enum_tryfold_ops())
diff --git a/rpython/translator/stm/test/test_inevitable.py b/rpython/translator/stm/test/test_inevitable.py
--- a/rpython/translator/stm/test/test_inevitable.py
+++ b/rpython/translator/stm/test/test_inevitable.py
@@ -279,3 +279,16 @@
res = self.interpret_inevitable(f1, [])
assert res is None
+
+ def test_threadlocal(self):
+ from rpython.rlib.rthread import ThreadLocalReference
+ opaque_id = lltype.opaqueptr(ThreadLocalReference.OPAQUEID, "foobar")
+ X = lltype.GcStruct('X', ('foo', lltype.Signed))
+ def f1():
+ x = lltype.malloc(X)
+ llop.threadlocalref_set(lltype.Void, opaque_id, x)
+ y = llop.threadlocalref_get(lltype.Ptr(X), opaque_id)
+ return x == y
+
+ res = self.interpret_inevitable(f1, [])
+ assert res is None
From noreply at buildbot.pypy.org Thu Aug 14 17:32:49 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Thu, 14 Aug 2014 17:32:49 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Accept set(NULL)
Message-ID: <20140814153249.5E1501C0323@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72804:0d4e673c7370
Date: 2014-08-14 17:31 +0200
http://bitbucket.org/pypy/pypy/changeset/0d4e673c7370/
Log: Accept set(NULL)
diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py
--- a/rpython/rlib/rthread.py
+++ b/rpython/rlib/rthread.py
@@ -322,8 +322,9 @@
from rpython.rlib.objectmodel import running_on_llinterp
ptr = cast_instance_to_base_ptr(value)
if not running_on_llinterp:
- gcref = lltype.cast_opaque_ptr(llmemory.GCREF, ptr)
- _make_sure_does_not_move(gcref)
+ if ptr:
+ gcref = lltype.cast_opaque_ptr(llmemory.GCREF, ptr)
+ _make_sure_does_not_move(gcref)
llop.threadlocalref_set(lltype.Void, opaque_id, ptr)
ensure_threadlocal()
else:
From noreply at buildbot.pypy.org Thu Aug 14 19:05:16 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Thu, 14 Aug 2014 19:05:16 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: add parent object as
argument. forgot it for the partial trace and drag out
Message-ID: <20140814170516.331BE1D362F@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72805:7175f6eeea87
Date: 2014-08-14 16:29 +0200
http://bitbucket.org/pypy/pypy/changeset/7175f6eeea87/
Log: add parent object as argument. forgot it for the partial trace and
drag out
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -1805,7 +1805,7 @@
ll_assert(start < stop, "empty or negative range "
"in trace_and_drag_out_of_nursery_partial()")
#print 'trace_partial:', start, stop, '\t', obj
- self.trace_partial(obj, start, stop, self._trace_drag_out, llmemory.NULL)
+ self.trace_partial(obj, start, stop, self._trace_drag_out, obj)
def _trace_drag_out1(self, root):
From noreply at buildbot.pypy.org Thu Aug 14 19:05:17 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Thu, 14 Aug 2014 19:05:17 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: always add parent of a
pinned object to the list of old objects pointing to
Message-ID: <20140814170517.7EF7A1D362F@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72806:998fdf343c42
Date: 2014-08-14 16:35 +0200
http://bitbucket.org/pypy/pypy/changeset/998fdf343c42/
Log: always add parent of a pinned object to the list of old objects
pointing to pinned objects.
Forgot to think about the case where two (or more) old objects point
to the same pinned one. Each of this old objects must be in the list
as if one dies, we still have to keep the pinned object alive (and
pinned).
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -1862,14 +1862,15 @@
#
elif self._is_pinned(obj):
hdr = self.header(obj)
+ # track parent of pinned object specially
+ if parent != llmemory.NULL:
+ self.old_objects_pointing_to_pinned.append(parent)
+
if hdr.tid & GCFLAG_VISITED:
# already visited and keeping track of the object
return
hdr.tid |= GCFLAG_VISITED
#
- if parent != llmemory.NULL:
- self.old_objects_pointing_to_pinned.append(parent)
- #
# XXX add additional checks for unsupported pinned objects (groggi)
ll_assert(not self.header(obj).tid & GCFLAG_HAS_CARDS,
"pinned object with GCFLAG_HAS_CARDS not supported")
From noreply at buildbot.pypy.org Thu Aug 14 19:05:18 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Thu, 14 Aug 2014 19:05:18 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: move
old_object_pointing_to_pinned cleanup to the end of the marking phase.
Message-ID: <20140814170518.B03EA1D362F@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72807:249e52c74a0c
Date: 2014-08-14 16:38 +0200
http://bitbucket.org/pypy/pypy/changeset/249e52c74a0c/
Log: move old_object_pointing_to_pinned cleanup to the end of the marking
phase.
Doing it at the start of the sweeping phase just results in multiple
runs of this cleanup code. At the same time, at the end of the
marking phase we know everything we need to know to do a cleanup of
the list.
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -2102,19 +2102,19 @@
#objects_to_trace processed fully, can move on to sweeping
self.ac.mass_free_prepare()
self.start_free_rawmalloc_objects()
+ #
+ # get rid of objects pointing to pinned objects that were not
+ # visited
+ new_old_objects_pointing_to_pinned = self.AddressStack()
+ self.old_objects_pointing_to_pinned.foreach(
+ self._sweep_old_objects_pointing_to_pinned,
+ new_old_objects_pointing_to_pinned)
+ self.old_objects_pointing_to_pinned.delete()
+ self.old_objects_pointing_to_pinned = new_old_objects_pointing_to_pinned
self.gc_state = STATE_SWEEPING
#END MARKING
elif self.gc_state == STATE_SWEEPING:
#
- # get rid of objects pointing to pinned objects that were not
- # visited
- new_old_objects_pointing_to_pinned = self.AddressStack()
- self.old_objects_pointing_to_pinned.foreach(
- self._sweep_old_objects_pointing_to_pinned,
- new_old_objects_pointing_to_pinned)
- self.old_objects_pointing_to_pinned.delete()
- self.old_objects_pointing_to_pinned = new_old_objects_pointing_to_pinned
- #
if self.raw_malloc_might_sweep.non_empty():
# Walk all rawmalloced objects and free the ones that don't
# have the GCFLAG_VISITED flag. Visit at most 'limit' objects.
From noreply at buildbot.pypy.org Thu Aug 14 19:05:19 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Thu, 14 Aug 2014 19:05:19 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: new test to check if
old_objects_pointing_to_pinned isn't growing while
Message-ID: <20140814170519.D4F411D362F@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72808:79000e7c5466
Date: 2014-08-14 16:39 +0200
http://bitbucket.org/pypy/pypy/changeset/79000e7c5466/
Log: new test to check if old_objects_pointing_to_pinned isn't growing
while old object pointing to pinned objects stays the same. fails
right now.
diff --git a/rpython/memory/gc/test/test_object_pinning.py b/rpython/memory/gc/test/test_object_pinning.py
--- a/rpython/memory/gc/test/test_object_pinning.py
+++ b/rpython/memory/gc/test/test_object_pinning.py
@@ -6,7 +6,8 @@
S = lltype.GcForwardReference()
S.become(lltype.GcStruct('pinning_test_struct',
('someInt', lltype.Signed),
- ('next', lltype.Ptr(S))))
+ ('next', lltype.Ptr(S)),
+ ('data', lltype.Ptr(S))))
class PinningGCTest(BaseDirectGCTest):
@@ -487,6 +488,41 @@
self.pin_referenced_from_prebuilt(self.gc.collect)
+ def test_old_objects_pointing_to_pinned_not_exploading(self):
+ # scenario: two old object, each pointing twice to a pinned object.
+ # The internal 'old_objects_pointing_to_pinned' should contain
+ # always two objects.
+ # In previous implementation the list exploded (grew with every minor
+ # collection), hence this test.
+ old1_ptr = self.malloc(S)
+ old1_ptr.someInt = 900
+ self.stackroots.append(old1_ptr)
+
+ old2_ptr = self.malloc(S)
+ old2_ptr.someInt = 800
+ self.stackroots.append(old2_ptr)
+
+ pinned_ptr = self.malloc(S)
+ pinned_ptr.someInt = 100
+ assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned_ptr))
+
+ self.write(old1_ptr, 'next', pinned_ptr)
+ self.write(old1_ptr, 'data', pinned_ptr)
+ self.write(old2_ptr, 'next', pinned_ptr)
+ self.write(old2_ptr, 'data', pinned_ptr)
+
+ self.gc.collect()
+ old1_ptr = self.stackroots[0]
+ old2_ptr = self.stackroots[1]
+ assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(old1_ptr))
+ assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(old2_ptr))
+
+ # do multiple rounds to make sure
+ for _ in range(10):
+ assert self.gc.old_objects_pointing_to_pinned.length() == 2
+ self.gc.debug_gc_step()
+
+
def pin_shadow_1(self, collect_func):
ptr = self.malloc(S)
adr = llmemory.cast_ptr_to_adr(ptr)
From noreply at buildbot.pypy.org Thu Aug 14 19:05:21 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Thu, 14 Aug 2014 19:05:21 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: initial fix for
'test_old_objects_pointing_to_pinned_not_exploading'
Message-ID: <20140814170521.01D6F1D362F@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72809:df44232936bf
Date: 2014-08-14 16:42 +0200
http://bitbucket.org/pypy/pypy/changeset/df44232936bf/
Log: initial fix for 'test_old_objects_pointing_to_pinned_not_exploading'
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -1676,6 +1676,9 @@
self.nursery_free = self.nursery
self.nursery_top = self.nursery_barriers.popleft()
+ self.old_objects_pointing_to_pinned.foreach(
+ self._reset_flag_old_objects_pointing_to_pinned, None)
+
debug_print("minor collect, total memory used:",
self.get_total_memory_used())
debug_print("number of pinned objects:",
@@ -1687,6 +1690,10 @@
#
debug_stop("gc-minor")
+ def _reset_flag_old_objects_pointing_to_pinned(self, obj, ignore):
+ assert self.header(obj).tid & GCFLAG_PINNED
+ self.header(obj).tid &= ~GCFLAG_PINNED
+
def _visit_old_objects_pointing_to_pinned(self, obj, ignore):
self.trace(obj, self._trace_drag_out, obj)
@@ -1863,8 +1870,11 @@
elif self._is_pinned(obj):
hdr = self.header(obj)
# track parent of pinned object specially
- if parent != llmemory.NULL:
+ if parent != llmemory.NULL and \
+ not self.header(parent).tid & GCFLAG_PINNED:
+ #
self.old_objects_pointing_to_pinned.append(parent)
+ self.header(parent).tid |= GCFLAG_PINNED
if hdr.tid & GCFLAG_VISITED:
# already visited and keeping track of the object
From noreply at buildbot.pypy.org Thu Aug 14 19:05:22 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Thu, 14 Aug 2014 19:05:22 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: new GC flag that uses
the same bit as GCFLAG_PINNED for parents pointing to pinned objects.
Message-ID: <20140814170522.2A9D91D362F@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72810:f2e0b083a76b
Date: 2014-08-14 16:51 +0200
http://bitbucket.org/pypy/pypy/changeset/f2e0b083a76b/
Log: new GC flag that uses the same bit as GCFLAG_PINNED for parents
pointing to pinned objects.
Looks a bit cleaner than using GCFLAG_PINNED on old objects.
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -138,6 +138,14 @@
# details.
GCFLAG_PINNED = first_gcflag << 9
+# The following flag is set only on objects outside the nursery
+# (i.e. old objects). Therefore we can reuse GCFLAG_PINNED as it is used for
+# the same feature (object pinning) and GCFLAG_PINNED is only used on nursery
+# objects.
+# If this flag is set, the flagged object is already an element of
+# 'old_objects_pointing_to_pinned' and doesn't have to be added again.
+GCFLAG_PINNED_OBJECT_PARENT_KNOWN = GCFLAG_PINNED
+
_GCFLAG_FIRST_UNUSED = first_gcflag << 10 # the first unused bit
@@ -1676,6 +1684,7 @@
self.nursery_free = self.nursery
self.nursery_top = self.nursery_barriers.popleft()
+ # clear GCFLAG_PINNED_OBJECT_PARENT_KNOWN from all parents in the list.
self.old_objects_pointing_to_pinned.foreach(
self._reset_flag_old_objects_pointing_to_pinned, None)
@@ -1691,8 +1700,8 @@
debug_stop("gc-minor")
def _reset_flag_old_objects_pointing_to_pinned(self, obj, ignore):
- assert self.header(obj).tid & GCFLAG_PINNED
- self.header(obj).tid &= ~GCFLAG_PINNED
+ assert self.header(obj).tid & GCFLAG_PINNED_OBJECT_PARENT_KNOWN
+ self.header(obj).tid &= ~GCFLAG_PINNED_OBJECT_PARENT_KNOWN
def _visit_old_objects_pointing_to_pinned(self, obj, ignore):
self.trace(obj, self._trace_drag_out, obj)
@@ -1871,7 +1880,7 @@
hdr = self.header(obj)
# track parent of pinned object specially
if parent != llmemory.NULL and \
- not self.header(parent).tid & GCFLAG_PINNED:
+ not self.header(parent).tid & GCFLAG_PINNED_OBJECT_PARENT_KNOWN:
#
self.old_objects_pointing_to_pinned.append(parent)
self.header(parent).tid |= GCFLAG_PINNED
From noreply at buildbot.pypy.org Thu Aug 14 19:05:23 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Thu, 14 Aug 2014 19:05:23 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: use one const pointer to
point to the array and not each time a new one.
Message-ID: <20140814170523.72FCF1D362F@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72811:f7318fcee996
Date: 2014-08-14 17:05 +0200
http://bitbucket.org/pypy/pypy/changeset/f7318fcee996/
Log: use one const pointer to point to the array and not each time a new
one.
the JIT trace looks nicer this way :-)
diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py
--- a/rpython/jit/backend/llsupport/gc.py
+++ b/rpython/jit/backend/llsupport/gc.py
@@ -27,15 +27,17 @@
_ref_array_type = lltype.GcArray(llmemory.GCREF)
def __init__(self, cpu, size):
+ self._size = size
self._next_item = 0
self._ref_array = lltype.malloc(PinnedObjectTracker._ref_array_type, size)
self.ref_array_descr = cpu.arraydescrof(PinnedObjectTracker._ref_array_type)
self.ref_array_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, self._ref_array)
+ self.const_ptr_gcref_array = ConstPtr(self.ref_array_gcref)
def add_ref(self, ref):
index = self._next_item
+ assert index < self._size
self._next_item += 1
- #
self._ref_array[index] = ref
return index
@@ -142,7 +144,7 @@
result_ptr = BoxPtr()
array_index = pinned_obj_tracker.add_ref(p)
load_op = ResOperation(rop.GETARRAYITEM_GC,
- [ConstPtr(pinned_obj_tracker.ref_array_gcref),
+ [pinned_obj_tracker.const_ptr_gcref_array,
ConstInt(array_index)],
result_ptr,
descr=pinned_obj_tracker.ref_array_descr)
From noreply at buildbot.pypy.org Thu Aug 14 19:05:24 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Thu, 14 Aug 2014 19:05:24 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: add first simple JIT
test with a pinned object
Message-ID: <20140814170524.90F401D362F@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72812:b5b57f12e441
Date: 2014-08-14 19:03 +0200
http://bitbucket.org/pypy/pypy/changeset/b5b57f12e441/
Log: add first simple JIT test with a pinned object
diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py
--- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py
+++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py
@@ -775,3 +775,32 @@
def test_compile_framework_call_assembler(self):
self.run('compile_framework_call_assembler')
+
+ def define_pinned_simple(cls):
+ from rpython.rlib.jit import promote
+ class H:
+ inst = None
+ helper = H()
+
+ @dont_look_inside
+ def get_y():
+ if not helper.inst:
+ helper.inst = X()
+ helper.inst.x = 101
+ assert rgc.pin(helper.inst)
+ else:
+ assert rgc._is_pinned(helper.inst)
+ return helper.inst
+
+ def fn(n, x, *args):
+ t = get_y()
+ promote(t)
+ t.x += 11
+ n -= 1
+ return (n, x) + args
+
+ return None, fn, None
+
+ def test_pinned_simple(self):
+ self.run('pinned_simple')
+
From noreply at buildbot.pypy.org Thu Aug 14 19:05:25 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Thu, 14 Aug 2014 19:05:25 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: fix code for the case of
no pinned objects
Message-ID: <20140814170525.BCC741D362F@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72813:4eed5d00ac19
Date: 2014-08-14 19:04 +0200
http://bitbucket.org/pypy/pypy/changeset/4eed5d00ac19/
Log: fix code for the case of no pinned objects
diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py
--- a/rpython/jit/backend/llsupport/gc.py
+++ b/rpython/jit/backend/llsupport/gc.py
@@ -182,7 +182,10 @@
newnewops.extend(reops)
else:
newnewops.append(op)
- return newnewops
+ #
+ return newnewops
+ else:
+ return newops
@specialize.memo()
def getframedescrs(self, cpu):
From noreply at buildbot.pypy.org Thu Aug 14 19:50:47 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Thu, 14 Aug 2014 19:50:47 +0200 (CEST)
Subject: [pypy-commit] pypy default: Add a FAQ entry
Message-ID: <20140814175047.0715B1D362E@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r72814:0b7f33f969db
Date: 2014-08-14 19:50 +0200
http://bitbucket.org/pypy/pypy/changeset/0b7f33f969db/
Log: Add a FAQ entry
diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
--- a/pypy/doc/faq.rst
+++ b/pypy/doc/faq.rst
@@ -154,6 +154,17 @@
declaring that some sections of the code should run without releasing
the GIL in the middle (these are called *atomic sections* in STM).
+--------------------------------------------------
+Is PyPy more clever than CPython about Tail Calls?
+--------------------------------------------------
+
+No. PyPy follows the Python language design, including the built-in
+debugger features. `This prevents tail calls.`__ Neither the JIT
+nor Stackless__ change anything to that.
+
+.. __: http://neopythonic.blogspot.com.au/2009/04/final-words-on-tail-calls.html
+.. __: stackless.html
+
------------------------------------------
How do I write extension modules for PyPy?
------------------------------------------
From noreply at buildbot.pypy.org Thu Aug 14 20:01:36 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Thu, 14 Aug 2014 20:01:36 +0200 (CEST)
Subject: [pypy-commit] pypy default: Update the FAQ entry
Message-ID: <20140814180136.9B1E91D362E@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r72815:0610ca82473d
Date: 2014-08-14 20:00 +0200
http://bitbucket.org/pypy/pypy/changeset/0610ca82473d/
Log: Update the FAQ entry
diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
--- a/pypy/doc/faq.rst
+++ b/pypy/doc/faq.rst
@@ -159,10 +159,12 @@
--------------------------------------------------
No. PyPy follows the Python language design, including the built-in
-debugger features. `This prevents tail calls.`__ Neither the JIT
-nor Stackless__ change anything to that.
+debugger features. This prevents tail calls, as summarized by Guido
+van Rossum in two__ blog__ posts. Moreover, neither the JIT nor
+Stackless__ change anything to that.
-.. __: http://neopythonic.blogspot.com.au/2009/04/final-words-on-tail-calls.html
+.. __: http://neopythonic.blogspot.com/2009/04/tail-recursion-elimination.html
+.. __: http://neopythonic.blogspot.com/2009/04/final-words-on-tail-calls.html
.. __: stackless.html
------------------------------------------
From noreply at buildbot.pypy.org Thu Aug 14 20:06:00 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Thu, 14 Aug 2014 20:06:00 +0200 (CEST)
Subject: [pypy-commit] pypy default: Update this FAQ entry
Message-ID: <20140814180600.3DF8E1D3633@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r72816:87622de1d682
Date: 2014-08-14 20:05 +0200
http://bitbucket.org/pypy/pypy/changeset/87622de1d682/
Log: Update this FAQ entry
diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
--- a/pypy/doc/faq.rst
+++ b/pypy/doc/faq.rst
@@ -147,12 +147,12 @@
programmer).
Instead, since 2012, there is work going on on a still very experimental
-Software Transactional Memory (STM) version of PyPy. This should give
-an alternative PyPy which internally has no GIL, while at the same time
+`Software Transactional Memory`_ (STM) version of PyPy. This should give
+an alternative PyPy which works without a GIL, while at the same time
continuing to give the Python programmer the complete illusion of having
-one. It would in fact push forward *more* GIL-ish behavior, like
-declaring that some sections of the code should run without releasing
-the GIL in the middle (these are called *atomic sections* in STM).
+one.
+
+.. _`Software Transactional Memory`: stm.html
--------------------------------------------------
Is PyPy more clever than CPython about Tail Calls?
From noreply at buildbot.pypy.org Fri Aug 15 11:35:50 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Fri, 15 Aug 2014 11:35:50 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: JIT test to check if
unpinning an objects works
Message-ID: <20140815093550.E3DEA1C0EC8@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72817:a2d0b17c975c
Date: 2014-08-15 11:34 +0200
http://bitbucket.org/pypy/pypy/changeset/a2d0b17c975c/
Log: JIT test to check if unpinning an objects works
diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py
--- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py
+++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py
@@ -804,3 +804,48 @@
def test_pinned_simple(self):
self.run('pinned_simple')
+ def define_pinned_unpin(cls):
+ from rpython.rlib.jit import promote
+ class H:
+ inst = None
+ pinned = False
+ count_pinned = 0
+ count_unpinned = 0
+ helper = H()
+
+ @dont_look_inside
+ def get_y(n):
+ if not helper.inst:
+ helper.inst = X()
+ helper.inst.x = 101
+ helper.pinned = True
+ assert rgc.pin(helper.inst)
+ elif n < 100 and helper.pinned:
+ rgc.unpin(helper.inst)
+ helper.pinned = False
+ #
+ if helper.pinned:
+ assert rgc._is_pinned(helper.inst)
+ helper.count_pinned += 1
+ else:
+ assert not rgc._is_pinned(helper.inst)
+ helper.count_unpinned += 1
+ return helper.inst
+
+ def fn(n, x, *args):
+ t = get_y(n)
+ promote(t)
+ assert t.x == 101
+ n -= 1
+ return (n, x) + args
+
+ def after(n, x, *args):
+ assert helper.count_pinned > 0
+ assert helper.count_unpinned > 0
+ assert not helper.pinned
+
+ return None, fn, after
+
+ def test_pinned_unpin(self):
+ self.run('pinned_unpin')
+
From noreply at buildbot.pypy.org Fri Aug 15 13:17:55 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Fri, 15 Aug 2014 13:17:55 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: consistency: use check()
instead of assert
Message-ID: <20140815111755.3F4851C0157@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72818:54527e0ff202
Date: 2014-08-15 13:16 +0200
http://bitbucket.org/pypy/pypy/changeset/54527e0ff202/
Log: consistency: use check() instead of assert
diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py
--- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py
+++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py
@@ -787,9 +787,9 @@
if not helper.inst:
helper.inst = X()
helper.inst.x = 101
- assert rgc.pin(helper.inst)
+ check(rgc.pin(helper.inst))
else:
- assert rgc._is_pinned(helper.inst)
+ check(rgc._is_pinned(helper.inst))
return helper.inst
def fn(n, x, *args):
@@ -819,30 +819,30 @@
helper.inst = X()
helper.inst.x = 101
helper.pinned = True
- assert rgc.pin(helper.inst)
+ check(rgc.pin(helper.inst))
elif n < 100 and helper.pinned:
rgc.unpin(helper.inst)
helper.pinned = False
#
if helper.pinned:
- assert rgc._is_pinned(helper.inst)
+ check(rgc._is_pinned(helper.inst))
helper.count_pinned += 1
else:
- assert not rgc._is_pinned(helper.inst)
+ check(not rgc._is_pinned(helper.inst))
helper.count_unpinned += 1
return helper.inst
def fn(n, x, *args):
t = get_y(n)
promote(t)
- assert t.x == 101
+ check(t.x == 101)
n -= 1
return (n, x) + args
def after(n, x, *args):
- assert helper.count_pinned > 0
- assert helper.count_unpinned > 0
- assert not helper.pinned
+ check(helper.count_pinned > 0)
+ check(helper.count_unpinned > 0)
+ check(not helper.pinned)
return None, fn, after
From noreply at buildbot.pypy.org Fri Aug 15 19:36:36 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Fri, 15 Aug 2014 19:36:36 +0200 (CEST)
Subject: [pypy-commit] pypy default: Oups,
thanks gregor_w for noticing that the test was not testing anything.
Message-ID: <20140815173636.DE26B1C0226@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r72819:39bb9189ac28
Date: 2014-08-15 19:25 +0200
http://bitbucket.org/pypy/pypy/changeset/39bb9189ac28/
Log: Oups, thanks gregor_w for noticing that the test was not testing
anything. Make it do so, and fix it.
diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py
--- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py
+++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py
@@ -633,9 +633,9 @@
return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s
def after(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s):
- check(x.x == 1800 * 2 + 1850 * 2 + 200 - 150)
+ check(x.x == 1800 * 2 + 150 * 2 + 200 - 1850)
- return before, f, None
+ return before, f, after
def test_compile_framework_external_exception_handling(self):
self.run('compile_framework_external_exception_handling')
From noreply at buildbot.pypy.org Sat Aug 16 03:04:24 2014
From: noreply at buildbot.pypy.org (pjenvey)
Date: Sat, 16 Aug 2014 03:04:24 +0200 (CEST)
Subject: [pypy-commit] pypy default: py3 compat
Message-ID: <20140816010424.5B9C51C0157@cobra.cs.uni-duesseldorf.de>
Author: Philip Jenvey
Branch:
Changeset: r72820:0efde1cfee32
Date: 2014-08-15 17:58 -0700
http://bitbucket.org/pypy/pypy/changeset/0efde1cfee32/
Log: py3 compat
diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py
--- a/pypy/interpreter/test/test_generator.py
+++ b/pypy/interpreter/test/test_generator.py
@@ -288,9 +288,9 @@
yield 5
raise # should raise "no active exception to re-raise"
gen = f()
- gen.next() # --> 5
+ next(gen) # --> 5
try:
- gen.next()
+ next(gen)
except TypeError:
pass
From noreply at buildbot.pypy.org Sat Aug 16 03:04:26 2014
From: noreply at buildbot.pypy.org (pjenvey)
Date: Sat, 16 Aug 2014 03:04:26 +0200 (CEST)
Subject: [pypy-commit] pypy py3k: merge default
Message-ID: <20140816010426.0D22F1C0157@cobra.cs.uni-duesseldorf.de>
Author: Philip Jenvey
Branch: py3k
Changeset: r72821:57f83c7b11fc
Date: 2014-08-15 17:58 -0700
http://bitbucket.org/pypy/pypy/changeset/57f83c7b11fc/
Log: merge default
diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
--- a/pypy/doc/faq.rst
+++ b/pypy/doc/faq.rst
@@ -147,12 +147,25 @@
programmer).
Instead, since 2012, there is work going on on a still very experimental
-Software Transactional Memory (STM) version of PyPy. This should give
-an alternative PyPy which internally has no GIL, while at the same time
+`Software Transactional Memory`_ (STM) version of PyPy. This should give
+an alternative PyPy which works without a GIL, while at the same time
continuing to give the Python programmer the complete illusion of having
-one. It would in fact push forward *more* GIL-ish behavior, like
-declaring that some sections of the code should run without releasing
-the GIL in the middle (these are called *atomic sections* in STM).
+one.
+
+.. _`Software Transactional Memory`: stm.html
+
+--------------------------------------------------
+Is PyPy more clever than CPython about Tail Calls?
+--------------------------------------------------
+
+No. PyPy follows the Python language design, including the built-in
+debugger features. This prevents tail calls, as summarized by Guido
+van Rossum in two__ blog__ posts. Moreover, neither the JIT nor
+Stackless__ change anything to that.
+
+.. __: http://neopythonic.blogspot.com/2009/04/tail-recursion-elimination.html
+.. __: http://neopythonic.blogspot.com/2009/04/final-words-on-tail-calls.html
+.. __: stackless.html
------------------------------------------
How do I write extension modules for PyPy?
diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py
--- a/pypy/interpreter/test/test_generator.py
+++ b/pypy/interpreter/test/test_generator.py
@@ -309,9 +309,9 @@
yield 5
raise # should raise "no active exception to re-raise"
gen = f()
- gen.next() # --> 5
+ next(gen) # --> 5
try:
- gen.next()
+ next(gen)
except TypeError:
pass
diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py
--- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py
+++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py
@@ -633,9 +633,9 @@
return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s
def after(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s):
- check(x.x == 1800 * 2 + 1850 * 2 + 200 - 150)
+ check(x.x == 1800 * 2 + 150 * 2 + 200 - 1850)
- return before, f, None
+ return before, f, after
def test_compile_framework_external_exception_handling(self):
self.run('compile_framework_external_exception_handling')
diff --git a/rpython/rtyper/normalizecalls.py b/rpython/rtyper/normalizecalls.py
--- a/rpython/rtyper/normalizecalls.py
+++ b/rpython/rtyper/normalizecalls.py
@@ -62,6 +62,8 @@
msg.append("the following functions:")
msg.append(" %s" % ("\n ".join(pfg), ))
msg.append("are called with inconsistent numbers of arguments")
+ msg.append("(and/or the argument names are different, which is"
+ " not supported in this case)")
if shape1[0] != shape2[0]:
msg.append("sometimes with %s arguments, sometimes with %s" % (shape1[0], shape2[0]))
else:
diff --git a/rpython/rtyper/test/test_normalizecalls.py b/rpython/rtyper/test/test_normalizecalls.py
--- a/rpython/rtyper/test/test_normalizecalls.py
+++ b/rpython/rtyper/test/test_normalizecalls.py
@@ -185,6 +185,7 @@
.+Sub1.fn
.+Sub2.fn
are called with inconsistent numbers of arguments
+\(and/or the argument names are different, which is not supported in this case\)
sometimes with \d arguments, sometimes with \d
the callers of these functions are:
.+otherfunc
From noreply at buildbot.pypy.org Sat Aug 16 03:04:27 2014
From: noreply at buildbot.pypy.org (pjenvey)
Date: Sat, 16 Aug 2014 03:04:27 +0200 (CEST)
Subject: [pypy-commit] pypy py3k: fix preserving the exception state between
generator yields for the 3rd time
Message-ID: <20140816010427.50C511C0157@cobra.cs.uni-duesseldorf.de>
Author: Philip Jenvey
Branch: py3k
Changeset: r72822:7ca938cad6ed
Date: 2014-08-15 18:01 -0700
http://bitbucket.org/pypy/pypy/changeset/7ca938cad6ed/
Log: fix preserving the exception state between generator yields for the
3rd time
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -63,6 +63,8 @@
try:
while True:
next_instr = self.handle_bytecode(co_code, next_instr, ec)
+ except Yield:
+ return self.popvalue()
except ExitFrame:
self.last_exception = None
return self.popvalue()
diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py
--- a/pypy/interpreter/test/test_generator.py
+++ b/pypy/interpreter/test/test_generator.py
@@ -307,12 +307,12 @@
foobar
except NameError:
yield 5
- raise # should raise "no active exception to re-raise"
+ raise
gen = f()
next(gen) # --> 5
try:
next(gen)
- except TypeError:
+ except NameError:
pass
From noreply at buildbot.pypy.org Sat Aug 16 03:04:28 2014
From: noreply at buildbot.pypy.org (pjenvey)
Date: Sat, 16 Aug 2014 03:04:28 +0200 (CEST)
Subject: [pypy-commit] pypy py3.3: merge py3k
Message-ID: <20140816010428.8F2DC1C0157@cobra.cs.uni-duesseldorf.de>
Author: Philip Jenvey
Branch: py3.3
Changeset: r72823:1d47dd6f19e4
Date: 2014-08-15 18:02 -0700
http://bitbucket.org/pypy/pypy/changeset/1d47dd6f19e4/
Log: merge py3k
diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
--- a/pypy/doc/faq.rst
+++ b/pypy/doc/faq.rst
@@ -147,12 +147,25 @@
programmer).
Instead, since 2012, there is work going on on a still very experimental
-Software Transactional Memory (STM) version of PyPy. This should give
-an alternative PyPy which internally has no GIL, while at the same time
+`Software Transactional Memory`_ (STM) version of PyPy. This should give
+an alternative PyPy which works without a GIL, while at the same time
continuing to give the Python programmer the complete illusion of having
-one. It would in fact push forward *more* GIL-ish behavior, like
-declaring that some sections of the code should run without releasing
-the GIL in the middle (these are called *atomic sections* in STM).
+one.
+
+.. _`Software Transactional Memory`: stm.html
+
+--------------------------------------------------
+Is PyPy more clever than CPython about Tail Calls?
+--------------------------------------------------
+
+No. PyPy follows the Python language design, including the built-in
+debugger features. This prevents tail calls, as summarized by Guido
+van Rossum in two__ blog__ posts. Moreover, neither the JIT nor
+Stackless__ change anything to that.
+
+.. __: http://neopythonic.blogspot.com/2009/04/tail-recursion-elimination.html
+.. __: http://neopythonic.blogspot.com/2009/04/final-words-on-tail-calls.html
+.. __: stackless.html
------------------------------------------
How do I write extension modules for PyPy?
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -63,6 +63,8 @@
try:
while True:
next_instr = self.handle_bytecode(co_code, next_instr, ec)
+ except Yield:
+ return self.popvalue()
except ExitFrame:
self.last_exception = None
return self.popvalue()
diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py
--- a/pypy/interpreter/test/test_generator.py
+++ b/pypy/interpreter/test/test_generator.py
@@ -307,12 +307,12 @@
foobar
except NameError:
yield 5
- raise # should raise "no active exception to re-raise"
+ raise
gen = f()
- gen.next() # --> 5
+ next(gen) # --> 5
try:
- gen.next()
- except TypeError:
+ next(gen)
+ except NameError:
pass
def test_yield_return(self):
diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py
--- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py
+++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py
@@ -633,9 +633,9 @@
return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s
def after(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s):
- check(x.x == 1800 * 2 + 1850 * 2 + 200 - 150)
+ check(x.x == 1800 * 2 + 150 * 2 + 200 - 1850)
- return before, f, None
+ return before, f, after
def test_compile_framework_external_exception_handling(self):
self.run('compile_framework_external_exception_handling')
diff --git a/rpython/rtyper/normalizecalls.py b/rpython/rtyper/normalizecalls.py
--- a/rpython/rtyper/normalizecalls.py
+++ b/rpython/rtyper/normalizecalls.py
@@ -62,6 +62,8 @@
msg.append("the following functions:")
msg.append(" %s" % ("\n ".join(pfg), ))
msg.append("are called with inconsistent numbers of arguments")
+ msg.append("(and/or the argument names are different, which is"
+ " not supported in this case)")
if shape1[0] != shape2[0]:
msg.append("sometimes with %s arguments, sometimes with %s" % (shape1[0], shape2[0]))
else:
diff --git a/rpython/rtyper/test/test_normalizecalls.py b/rpython/rtyper/test/test_normalizecalls.py
--- a/rpython/rtyper/test/test_normalizecalls.py
+++ b/rpython/rtyper/test/test_normalizecalls.py
@@ -185,6 +185,7 @@
.+Sub1.fn
.+Sub2.fn
are called with inconsistent numbers of arguments
+\(and/or the argument names are different, which is not supported in this case\)
sometimes with \d arguments, sometimes with \d
the callers of these functions are:
.+otherfunc
From noreply at buildbot.pypy.org Sat Aug 16 16:39:48 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Sat, 16 Aug 2014 16:39:48 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Minimal set of changes to
pass targetdemo2 while moving away from the
Message-ID: <20140816143948.6AE541C0157@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72824:459042b5f19d
Date: 2014-08-16 16:39 +0200
http://bitbucket.org/pypy/pypy/changeset/459042b5f19d/
Log: Minimal set of changes to pass targetdemo2 while moving away from
the stm_perform_*() model
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -18,7 +18,6 @@
from pypy.interpreter.nestedscope import Cell
from pypy.interpreter.pycode import PyCode, BytecodeCorruption
from pypy.tool.stdlib_opcode import bytecode_spec
-from rpython.rlib.jit import we_are_jitted
def unaryoperation(operationname):
"""NOT_RPYTHON"""
@@ -44,14 +43,6 @@
return func_with_new_name(opimpl, "opcode_impl_for_%s" % operationname)
-# ____________________________________________________________
-
-stmonly_jitdriver = jit.JitDriver(greens=[], reds=['next_instr', 'ec',
- 'self', 'co_code'],
- stm_do_transaction_breaks=True)
-
-# ____________________________________________________________
-
opcodedesc = bytecode_spec.opcodedesc
HAVE_ARGUMENT = bytecode_spec.HAVE_ARGUMENT
@@ -65,13 +56,8 @@
# For the sequel, force 'next_instr' to be unsigned for performance
next_instr = r_uint(next_instr)
co_code = pycode.co_code
+ rstm.rewind_jmp_frame()
while True:
- if self.space.config.translation.stm:
- # only used for no-jit. The jit-jitdriver is
- # in interp_jit.py
- stmonly_jitdriver.jit_merge_point(
- self=self, co_code=co_code,
- next_instr=next_instr, ec=ec)
rstm.push_marker(intmask(next_instr) * 2 + 1, self.pycode)
try:
next_instr = self.handle_bytecode(co_code, next_instr, ec)
@@ -165,6 +151,7 @@
ec.bytecode_only_trace(self)
else:
ec.bytecode_trace(self)
+ rstm.possible_transaction_break()
next_instr = r_uint(self.last_instr)
opcode = ord(co_code[next_instr])
next_instr += 1
@@ -199,7 +186,7 @@
else:
unroller = SReturnValue(w_returnvalue)
next_instr = block.handle(self, unroller)
- # now inside a 'finally' block
+ return next_instr # now inside a 'finally' block
elif opcode == opcodedesc.END_FINALLY.index:
unroller = self.end_finally()
if isinstance(unroller, SuspendedUnroller):
@@ -211,12 +198,13 @@
raise Return
else:
next_instr = block.handle(self, unroller)
+ return next_instr
elif opcode == opcodedesc.JUMP_ABSOLUTE.index:
return self.jump_absolute(oparg, ec)
elif opcode == opcodedesc.BREAK_LOOP.index:
next_instr = self.BREAK_LOOP(oparg, next_instr)
elif opcode == opcodedesc.CONTINUE_LOOP.index:
- next_instr = self.CONTINUE_LOOP(oparg, next_instr)
+ return self.CONTINUE_LOOP(oparg, next_instr)
elif opcode == opcodedesc.FOR_ITER.index:
next_instr = self.FOR_ITER(oparg, next_instr)
elif opcode == opcodedesc.JUMP_FORWARD.index:
@@ -457,22 +445,6 @@
if jit.we_are_jitted():
return next_instr
- if self.space.config.translation.stm:
- # with STM, if should_break_transaction(), then it is a good
- # idea to leave and let _dispatch_stm_breaking_transaction()
- # break the transaction. But avoid doing it if we are in a
- # tail-call position: if the next opcode is RETURN_VALUE, or
- # one of the opcodes in the one of the sequences
- # * POP_TOP/LOAD_CONST/RETURN_VALUE
- # * POP_TOP/LOAD_FAST/RETURN_VALUE
- if rstm.should_break_transaction():
- opcode = ord(co_code[next_instr])
- if opcode not in (opcodedesc.RETURN_VALUE.index,
- opcodedesc.POP_TOP.index,
- opcodedesc.LOAD_CONST.index,
- opcodedesc.LOAD_FAST.index):
- return next_instr
-
rstm.update_marker_num(intmask(next_instr) * 2 + 1)
@jit.unroll_safe
diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py
--- a/rpython/rlib/jit.py
+++ b/rpython/rlib/jit.py
@@ -483,16 +483,12 @@
name = 'jitdriver'
inline_jit_merge_point = False
_store_last_enter_jit = None
- stm_do_transaction_breaks = False
- stm_report_location = None
def __init__(self, greens=None, reds=None, virtualizables=None,
get_jitcell_at=None, set_jitcell_at=None,
get_printable_location=None, confirm_enter_jit=None,
can_never_inline=None, should_unroll_one_iteration=None,
- name='jitdriver', check_untranslated=True,
- stm_do_transaction_breaks=None,
- stm_report_location=None):
+ name='jitdriver', check_untranslated=True):
if greens is not None:
self.greens = greens
self.name = name
@@ -528,10 +524,6 @@
self.can_never_inline = can_never_inline
self.should_unroll_one_iteration = should_unroll_one_iteration
self.check_untranslated = check_untranslated
- if stm_do_transaction_breaks is not None:
- self.stm_do_transaction_breaks = stm_do_transaction_breaks
- if stm_report_location is not None:
- self.stm_report_location = stm_report_location
def _freeze_(self):
return True
@@ -826,6 +818,9 @@
v_red = hop.inputarg(r_red, arg=i)
reds_v.append(v_red)
hop.exception_cannot_occur()
+ if self.instance.__name__ == 'jit_merge_point':
+ if hop.rtyper.annotator.translator.config.translation.stm:
+ hop.genop('stm_rewind_jmp_frame', [], resulttype=lltype.Void)
vlist = [hop.inputconst(lltype.Void, self.instance.__name__),
hop.inputconst(lltype.Void, driver)]
vlist.extend(greens_v)
diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py
--- a/rpython/rlib/rstm.py
+++ b/rpython/rlib/rstm.py
@@ -1,5 +1,6 @@
from rpython.rlib.objectmodel import we_are_translated, specialize
from rpython.rlib.objectmodel import CDefinedIntSymbolic
+from rpython.rlib.rgc import stm_is_enabled
from rpython.rtyper.lltypesystem import lltype, rffi, rstr, llmemory
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.rtyper.extregistry import ExtRegistryEntry
@@ -40,6 +41,18 @@
CFlexSymbolic('((long)&pypy_stm_start_transaction)'))
+def rewind_jmp_frame():
+ """At some key places, like the entry point of the thread and in the
+ function with the interpreter's dispatch loop, this must be called
+ (it turns into a marker in the caller's function). There is one
+ automatically in any jit.jit_merge_point()."""
+ # special-cased below
+
+def possible_transaction_break():
+ if stm_is_enabled():
+ if llop.stm_should_break_transaction(lltype.Bool):
+ llop.stm_transaction_break(lltype.Void)
+
def jit_stm_transaction_break_point():
# XXX REFACTOR AWAY
if we_are_translated():
@@ -77,6 +90,10 @@
llop.stm_should_break_transaction(lltype.Bool))
@dont_look_inside
+def break_transaction():
+ llop.stm_break_transaction(lltype.Void)
+
+ at dont_look_inside
def set_transaction_length(fraction):
llop.stm_set_transaction_length(lltype.Void, float(fraction))
@@ -161,26 +178,13 @@
# ____________________________________________________________
-def make_perform_transaction(func, CONTAINERP):
- from rpython.rtyper.annlowlevel import llhelper
- from rpython.rtyper.annlowlevel import cast_instance_to_base_ptr
- from rpython.translator.stm.stmgcintf import CALLBACK_TX
- #
- def _stm_callback(llcontainer, retry_counter):
- llcontainer = rffi.cast(CONTAINERP, llcontainer)
- retry_counter = rffi.cast(lltype.Signed, retry_counter)
- try:
- res = func(llcontainer, retry_counter)
- except Exception, e:
- res = 0 # ends perform_transaction() and returns
- lle = cast_instance_to_base_ptr(e)
- llcontainer.got_exception = lle
- return rffi.cast(rffi.INT_real, res)
- #
- @dont_look_inside
- def perform_transaction(llcontainer):
- llcallback = llhelper(CALLBACK_TX, _stm_callback)
- llop.stm_perform_transaction(lltype.Void, llcontainer, llcallback)
- perform_transaction._transaction_break_ = True
- #
- return perform_transaction
+class _Entry(ExtRegistryEntry):
+ _about_ = rewind_jmp_frame
+
+ def compute_result_annotation(self):
+ pass
+
+ def specialize_call(self, hop):
+ hop.exception_cannot_occur()
+ if hop.rtyper.annotator.translator.config.translation.stm:
+ hop.genop('stm_rewind_jmp_frame', [], resulttype=lltype.Void)
diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py
--- a/rpython/rtyper/lltypesystem/lloperation.py
+++ b/rpython/rtyper/lltypesystem/lloperation.py
@@ -434,8 +434,9 @@
'stm_abort_and_retry': LLOp(canmallocgc=True),
'stm_enter_callback_call': LLOp(canmallocgc=True),
'stm_leave_callback_call': LLOp(),
- 'stm_perform_transaction': LLOp(canmallocgc=True),
+ 'stm_transaction_break': LLOp(canmallocgc=True),
'stm_should_break_transaction': LLOp(sideeffects=False),
+ 'stm_rewind_jmp_frame': LLOp(),
'stm_set_transaction_length': LLOp(),
'stm_hint_commit_soon': LLOp(canrun=True),
diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py
--- a/rpython/rtyper/lltypesystem/rffi.py
+++ b/rpython/rtyper/lltypesystem/rffi.py
@@ -318,8 +318,11 @@
if aroundstate is not None:
if aroundstate.enter_callback is not None:
token = aroundstate.enter_callback()
- elif aroundstate.after is not None:
- aroundstate.after()
+ llop.stm_rewind_jmp_frame(lltype.Void, 1)
+ else:
+ after = aroundstate.after
+ if after is not None:
+ after()
# from now on we hold the GIL
stackcounter.stacks_counter += 1
llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py
@@ -336,9 +339,12 @@
stackcounter.stacks_counter -= 1
if aroundstate is not None:
if aroundstate.leave_callback is not None:
+ llop.stm_rewind_jmp_frame(lltype.Void, 2)
aroundstate.leave_callback(token)
- elif aroundstate.before is not None:
- aroundstate.before()
+ else:
+ before = aroundstate.before
+ if before is not None:
+ before()
# here we don't hold the GIL any more. As in the wrapper() produced
# by llexternal, it is essential that no exception checking occurs
# after the call to before().
diff --git a/rpython/translator/backendopt/gilanalysis.py b/rpython/translator/backendopt/gilanalysis.py
--- a/rpython/translator/backendopt/gilanalysis.py
+++ b/rpython/translator/backendopt/gilanalysis.py
@@ -21,13 +21,11 @@
self, graph, seen)
def analyze_external_call(self, op, seen=None):
- funcobj = op.args[0].value._obj
- if getattr(funcobj, 'transactionsafe', False):
- return False
- else:
- return False
-
+ return False
+
def analyze_simple_operation(self, op, graphinfo):
+ if op.opname == 'stm_break_transaction':
+ return True
return False
def analyze(graphs, translator):
diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py
--- a/rpython/translator/c/funcgen.py
+++ b/rpython/translator/c/funcgen.py
@@ -1,5 +1,4 @@
import sys
-from rpython.translator.c.support import USESLOTS # set to False if necessary while refactoring
from rpython.translator.c.support import cdecl
from rpython.translator.c.support import llvalue_from_constant, gen_assignments
from rpython.translator.c.support import c_string_constant, barebonearray
@@ -24,17 +23,7 @@
Collects information about a function which we have to generate
from a flow graph.
"""
-
- if USESLOTS:
- __slots__ = """graph db gcpolicy
- exception_policy
- more_ll_values
- vars all_cached_consts
- illtypes
- functionname
- blocknum
- innerloops
- oldgraph""".split()
+ use_stm_rewind_jmp_frame = False
def __init__(self, graph, db, exception_policy=None, functionname=None):
graph._seen_by_the_backend = True
@@ -75,6 +64,11 @@
for block in self.graph.iterblocks():
mix.extend(block.inputargs)
for op in block.operations:
+ if op.opname == 'stm_rewind_jmp_frame':
+ if len(op.args) == 0:
+ self.use_stm_rewind_jmp_frame = "automatic"
+ elif not self.use_stm_rewind_jmp_frame:
+ self.use_stm_rewind_jmp_frame = True
mix.extend(op.args)
mix.append(op.result)
for link in block.exits:
@@ -203,6 +197,11 @@
# ____________________________________________________________
def cfunction_body(self):
+ if self.use_stm_rewind_jmp_frame:
+ yield 'rewind_jmp_buf rjbuf1;'
+ if self.use_stm_rewind_jmp_frame == "automatic":
+ yield 'stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf1);'
+ #
graph = self.graph
yield 'goto block0;' # to avoid a warning "this label is not used"
@@ -221,6 +220,9 @@
if len(block.exits) == 0:
assert len(block.inputargs) == 1
# regular return block
+ if self.use_stm_rewind_jmp_frame == "automatic":
+ yield ('stm_rewind_jmp_leaveframe('
+ '&stm_thread_local, &rjbuf1);')
retval = self.expr(block.inputargs[0])
if self.exception_policy != "exc_helper":
yield 'RPY_DEBUG_RETURN();'
@@ -920,5 +922,3 @@
self.expr(op.args[0]))
else:
return None # use the default
-
-assert not USESLOTS or '__dict__' not in dir(FunctionCodeGenerator)
diff --git a/rpython/translator/c/src/entrypoint.c b/rpython/translator/c/src/entrypoint.c
--- a/rpython/translator/c/src/entrypoint.c
+++ b/rpython/translator/c/src/entrypoint.c
@@ -49,6 +49,11 @@
errmsg = RPython_StartupCode();
if (errmsg) goto error;
+#ifdef RPY_STM
+ rewind_jmp_buf rjbuf;
+ stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf);
+#endif
+
exitcode = STANDALONE_ENTRY_POINT(argc, argv);
pypy_debug_alloc_results();
@@ -60,6 +65,10 @@
pypy_malloc_counters_results();
+#ifdef RPY_STM
+ stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf);
+#endif
+
RPython_TeardownCode();
return exitcode;
diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py
--- a/rpython/translator/stm/funcgen.py
+++ b/rpython/translator/stm/funcgen.py
@@ -175,11 +175,8 @@
arg0 = funcgen.expr(op.args[0])
return 'pypy_stm_set_transaction_length(%s);' % (arg0,)
-def stm_perform_transaction(funcgen, op):
- arg0 = funcgen.expr(op.args[0])
- arg1 = funcgen.expr(op.args[1])
- return ('pypy_stm_perform_transaction((object_t *)%s, '
- '(int(*)(object_t *, int))%s);' % (arg0, arg1))
+def stm_transaction_break(funcgen, op):
+ return 'pypy_stm_transaction_break();'
def stm_increment_atomic(funcgen, op):
return 'pypy_stm_increment_atomic();'
@@ -259,3 +256,11 @@
'stm_thread_local.longest_marker_time = 0.0;\n'
'stm_thread_local.longest_marker_self[0] = 0;\n'
'stm_thread_local.longest_marker_other[0] = 0;')
+
+def stm_rewind_jmp_frame(funcgen, op):
+ if len(op.args) == 0:
+ return '/* automatic stm_rewind_jmp_frame */'
+ elif op.args[0].value == 1:
+ return 'stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf1);'
+ else:
+ return 'stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf1);'
diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c
--- a/rpython/translator/stm/src_stm/stmgcintf.c
+++ b/rpython/translator/stm/src_stm/stmgcintf.c
@@ -167,55 +167,11 @@
return counter;
}
-void pypy_stm_perform_transaction(object_t *arg, int callback(object_t *, int))
-{ /* must save roots around this call */
- //
- // XXX this function should be killed! We no longer need a
- // callback-based approach at all.
-
-#ifndef NDEBUG
- struct stm_shadowentry_s *volatile v_old_shadowstack =
- stm_thread_local.shadowstack;
-#endif
- rewind_jmp_buf rjbuf;
- stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf);
- //STM_PUSH_ROOT(stm_thread_local, STM_STACK_MARKER_NEW);
- STM_PUSH_ROOT(stm_thread_local, arg);
-
- while (1) {
- long counter;
- if (pypy_stm_should_break_transaction()) { //pypy_stm_ready_atomic == 1) {
- /* Not in an atomic transaction; but it might be an inevitable
- transaction.
- */
- assert(pypy_stm_nursery_low_fill_mark != (uintptr_t) -1);
-
- stm_commit_transaction();
-
- counter = _pypy_stm_start_transaction();
- }
- else {
- /* In an atomic transaction */
- //assert(pypy_stm_nursery_low_fill_mark == (uintptr_t) -1);
- counter = 0;
- }
-
- /* invoke the callback in the new transaction */
- STM_POP_ROOT(stm_thread_local, arg);
- assert(v_old_shadowstack == stm_thread_local.shadowstack);// - 1);
- STM_PUSH_ROOT(stm_thread_local, arg);
-
- long result = callback(arg, counter);
- if (result <= 0)
- break;
- }
-
- STM_POP_ROOT_RET(stm_thread_local); /* pop the 'arg' */
- //uintptr_t x = (uintptr_t)STM_POP_ROOT_RET(stm_thread_local);
- //assert(x == STM_STACK_MARKER_NEW || x == STM_STACK_MARKER_OLD);
- assert(v_old_shadowstack == stm_thread_local.shadowstack);
-
- stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf);
+void pypy_stm_transaction_break(void)
+{
+ assert(pypy_stm_nursery_low_fill_mark != (uintptr_t) -1);
+ stm_commit_transaction();
+ _pypy_stm_start_transaction();
}
void _pypy_stm_inev_state(void)
diff --git a/rpython/translator/stm/src_stm/stmgcintf.h b/rpython/translator/stm/src_stm/stmgcintf.h
--- a/rpython/translator/stm/src_stm/stmgcintf.h
+++ b/rpython/translator/stm/src_stm/stmgcintf.h
@@ -92,7 +92,7 @@
long pypy_stm_enter_callback_call(void);
void pypy_stm_leave_callback_call(long);
void pypy_stm_set_transaction_length(double);
-void pypy_stm_perform_transaction(object_t *, int(object_t *, int));//XXX
+void pypy_stm_transaction_break(void);
static inline int pypy_stm_should_break_transaction(void)
{
diff --git a/rpython/translator/stm/test/targetdemo2.py b/rpython/translator/stm/test/targetdemo2.py
--- a/rpython/translator/stm/test/targetdemo2.py
+++ b/rpython/translator/stm/test/targetdemo2.py
@@ -1,6 +1,6 @@
import time
from rpython.rlib import rthread
-from rpython.rlib import rstm, jit
+from rpython.rlib import rstm
from rpython.rlib.objectmodel import we_are_translated
from rpython.rlib.objectmodel import compute_identity_hash
from rpython.rlib.debug import ll_assert
@@ -65,12 +65,6 @@
print "check ok!"
-jitdriver_hash = jit.JitDriver(greens=[], reds=['value', 'self'])
-jitdriver_inev = jit.JitDriver(greens=[], reds=['value', 'self'])
-jitdriver_ptreq = jit.JitDriver(greens=[], reds=['self'])
-jitdriver_really = jit.JitDriver(greens=[], reds=['value', 'self'])
-
-
class ThreadRunner(object):
arg = None
@@ -94,7 +88,7 @@
def do_run_really(self):
value = 0
while True:
- jitdriver_really.jit_merge_point(self=self, value=value)
+ rstm.possible_transaction_break()
if not self.run_really(value):
break
value += 1
@@ -115,7 +109,7 @@
return (value+1) < glob.LENGTH
def do_check_ptr_equality(self):
- jitdriver_ptreq.jit_merge_point(self=self)
+ rstm.possible_transaction_break()
self.check_ptr_equality(0)
def check_ptr_equality(self, foo):
@@ -129,7 +123,7 @@
def do_check_inev(self):
value = 0
while True:
- jitdriver_inev.jit_merge_point(self=self, value=value)
+ rstm.possible_transaction_break()
if not self.check_inev(value):
break
value += 1
@@ -157,7 +151,7 @@
def do_check_hash(self):
value = 0
while True:
- jitdriver_hash.jit_merge_point(self=self, value=value)
+ rstm.possible_transaction_break()
value = self.check_hash(value)
if value >= glob.LENGTH:
break
From noreply at buildbot.pypy.org Sat Aug 16 18:47:27 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Sat, 16 Aug 2014 18:47:27 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: if the same pointer is
used multiple times, only use one array element.
Message-ID: <20140816164727.AC1561C0157@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72825:79cdf621b8fb
Date: 2014-08-16 15:58 +0200
http://bitbucket.org/pypy/pypy/changeset/79cdf621b8fb/
Log: if the same pointer is used multiple times, only use one array
element.
Implements the optimisation that was pointed to in commit
5dcc35cb8954a4ce373804707a6745c8adb3487c
diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py
--- a/rpython/jit/backend/llsupport/gc.py
+++ b/rpython/jit/backend/llsupport/gc.py
@@ -18,6 +18,7 @@
from rpython.jit.backend.llsupport.descr import get_call_descr
from rpython.jit.backend.llsupport.rewrite import GcRewriterAssembler
from rpython.memory.gctransform import asmgcroot
+from rpython.rtyper.lltypesystem import llmemory
class PinnedObjectTracker(object):
"""Simple helper class to keep informations regarding the 'GcArray'
@@ -26,21 +27,26 @@
_ref_array_type = lltype.GcArray(llmemory.GCREF)
- def __init__(self, cpu, size):
- self._size = size
- self._next_item = 0
+ def __init__(self, cpu, pointers):
+ # prepare GC array to hold the pointers
+ size = len(pointers)
self._ref_array = lltype.malloc(PinnedObjectTracker._ref_array_type, size)
self.ref_array_descr = cpu.arraydescrof(PinnedObjectTracker._ref_array_type)
self.ref_array_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, self._ref_array)
self.const_ptr_gcref_array = ConstPtr(self.ref_array_gcref)
+ #
+ # assign each pointer an index and put the pointer into the GC array
+ self._indexes = {}
+ for index in range(len(pointers)):
+ ptr = pointers[index]
+ self._indexes[llmemory.cast_ptr_to_adr(ptr)] = llmemory.cast_int_to_adr(index)
+ self._ref_array[index] = ptr
- def add_ref(self, ref):
- index = self._next_item
- assert index < self._size
- self._next_item += 1
- self._ref_array[index] = ref
+ def add_ref(self, ptr):
+ assert llmemory.cast_ptr_to_adr(ptr) in self._indexes
+ index = llmemory.cast_adr_to_int(self._indexes[llmemory.cast_ptr_to_adr(ptr)])
+ assert ptr == self._ref_array[index]
return index
-
# ____________________________________________________________
class GcLLDescription(GcCache):
@@ -114,7 +120,8 @@
def gc_malloc_unicode(self, num_elem):
return self._bh_malloc_array(num_elem, self.unicode_descr)
- def _record_constptrs(self, op, gcrefs_output_list, moving_output_list):
+ def _record_constptrs(self, op, gcrefs_output_list, moving_output_list,
+ known_pointers):
moving_output_list[op] = []
for i in range(op.numargs()):
v = op.getarg(i)
@@ -124,6 +131,8 @@
gcrefs_output_list.append(p)
else:
moving_output_list[op].append(i)
+ if p not in known_pointers:
+ known_pointers.append(p)
#
if op.is_guard() or op.getopnum() == rop.FINISH:
llref = cast_instance_to_gcref(op.getdescr())
@@ -165,11 +174,13 @@
newnewops = [] # XXX better name... (groggi)
moving_output_list = {}
+ known_pointers = []
for op in newops:
- self._record_constptrs(op, gcrefs_output_list, moving_output_list)
+ self._record_constptrs(op, gcrefs_output_list, moving_output_list,
+ known_pointers)
#
if len(moving_output_list) > 0:
- pinned_obj_tracker = PinnedObjectTracker(cpu, len(moving_output_list))
+ pinned_obj_tracker = PinnedObjectTracker(cpu, known_pointers)
if not we_are_translated():
self.last_pinned_object_tracker = pinned_obj_tracker
gcrefs_output_list.append(pinned_obj_tracker.ref_array_gcref)
From noreply at buildbot.pypy.org Sat Aug 16 18:47:29 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Sat, 16 Aug 2014 18:47:29 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: renaming, rewriting,
etc. for handling movable objects (i.e. ConstPtrs that have a
pointer which isn't really constant) inside the JIT
Message-ID: <20140816164729.03F9E1C0157@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72826:ed703b3e1311
Date: 2014-08-16 18:46 +0200
http://bitbucket.org/pypy/pypy/changeset/ed703b3e1311/
Log: renaming, rewriting, etc. for handling movable objects (i.e.
ConstPtrs that have a pointer which isn't really constant) inside
the JIT
diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py
--- a/rpython/jit/backend/llsupport/gc.py
+++ b/rpython/jit/backend/llsupport/gc.py
@@ -20,32 +20,36 @@
from rpython.memory.gctransform import asmgcroot
from rpython.rtyper.lltypesystem import llmemory
-class PinnedObjectTracker(object):
- """Simple helper class to keep informations regarding the 'GcArray'
- in one place that is used to double load pinned objects.
- """
+class MovableObjectTracker(object):
- _ref_array_type = lltype.GcArray(llmemory.GCREF)
+ ptr_array_type = lltype.GcArray(llmemory.GCREF)
- def __init__(self, cpu, pointers):
- # prepare GC array to hold the pointers
- size = len(pointers)
- self._ref_array = lltype.malloc(PinnedObjectTracker._ref_array_type, size)
- self.ref_array_descr = cpu.arraydescrof(PinnedObjectTracker._ref_array_type)
- self.ref_array_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, self._ref_array)
- self.const_ptr_gcref_array = ConstPtr(self.ref_array_gcref)
+ def __init__(self, cpu, const_pointers):
+ size = len(const_pointers)
+ # check that there are any moving object (i.e. chaning pointers).
+ # Otherwise there is no reason for an instance of this class.
+ assert size > 0
#
- # assign each pointer an index and put the pointer into the GC array
+ # prepare GC array to hold the pointers that may change
+ self.ptr_array = lltype.malloc(MovableObjectTracker.ptr_array_type, size)
+ self.ptr_array_descr = cpu.arraydescrof(MovableObjectTracker.ptr_array_type)
+ self.ptr_array_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, self.ptr_array)
+ # use always the same ConstPtr to access the array
+ # (easer to read JIT trace)
+ self.const_ptr_gcref_array = ConstPtr(self.ptr_array_gcref)
+ #
+ # assign each pointer an index and put the pointer into the GC array.
+ # as pointers and addresses are not a good key to use before translation
+ # ConstPtrs are used as the key for the dict.
self._indexes = {}
- for index in range(len(pointers)):
- ptr = pointers[index]
- self._indexes[llmemory.cast_ptr_to_adr(ptr)] = llmemory.cast_int_to_adr(index)
- self._ref_array[index] = ptr
+ for index in range(size):
+ ptr = const_pointers[index]
+ self._indexes[ptr] = index
+ self.ptr_array[index] = ptr.value
- def add_ref(self, ptr):
- assert llmemory.cast_ptr_to_adr(ptr) in self._indexes
- index = llmemory.cast_adr_to_int(self._indexes[llmemory.cast_ptr_to_adr(ptr)])
- assert ptr == self._ref_array[index]
+ def get_array_index(self, const_ptr):
+ index = self._indexes[const_ptr]
+ assert const_ptr.value == self.ptr_array[index]
return index
# ____________________________________________________________
@@ -120,9 +124,9 @@
def gc_malloc_unicode(self, num_elem):
return self._bh_malloc_array(num_elem, self.unicode_descr)
- def _record_constptrs(self, op, gcrefs_output_list, moving_output_list,
- known_pointers):
- moving_output_list[op] = []
+ def _record_constptrs(self, op, gcrefs_output_list, ops_with_movable_const_ptr,
+ changeable_const_pointers):
+ ops_with_movable_const_ptr[op] = []
for i in range(op.numargs()):
v = op.getarg(i)
if isinstance(v, ConstPtr) and bool(v.value):
@@ -130,33 +134,31 @@
if rgc._make_sure_does_not_move(p):
gcrefs_output_list.append(p)
else:
- moving_output_list[op].append(i)
- if p not in known_pointers:
- known_pointers.append(p)
+ ops_with_movable_const_ptr[op].append(i)
+ if v not in changeable_const_pointers:
+ changeable_const_pointers.append(v)
#
if op.is_guard() or op.getopnum() == rop.FINISH:
llref = cast_instance_to_gcref(op.getdescr())
assert rgc._make_sure_does_not_move(llref)
gcrefs_output_list.append(llref)
#
- if len(moving_output_list[op]) == 0:
- del moving_output_list[op]
+ if len(ops_with_movable_const_ptr[op]) == 0:
+ del ops_with_movable_const_ptr[op]
- def _rewrite_constptrs(self, op, moving_output_list, pinned_obj_tracker):
+ def _rewrite_changeable_constptrs(self, op, ops_with_movable_const_ptr, moving_obj_tracker):
newops = []
- for arg_i in moving_output_list[op]:
+ for arg_i in ops_with_movable_const_ptr[op]:
v = op.getarg(arg_i)
# assert to make sure we got what we expected
assert isinstance(v, ConstPtr)
- assert bool(v.value)
- p = v.value
result_ptr = BoxPtr()
- array_index = pinned_obj_tracker.add_ref(p)
+ array_index = moving_obj_tracker.get_array_index(v)
load_op = ResOperation(rop.GETARRAYITEM_GC,
- [pinned_obj_tracker.const_ptr_gcref_array,
+ [moving_obj_tracker.const_ptr_gcref_array,
ConstInt(array_index)],
result_ptr,
- descr=pinned_obj_tracker.ref_array_descr)
+ descr=moving_obj_tracker.ptr_array_descr)
newops.append(load_op)
op.setarg(arg_i, result_ptr)
#
@@ -166,37 +168,48 @@
def rewrite_assembler(self, cpu, operations, gcrefs_output_list):
rewriter = GcRewriterAssembler(self, cpu)
newops = rewriter.rewrite(operations)
- # record all GCREFs, because the GC (or Boehm) cannot see them and
- # keep them alive if they end up as constants in the assembler
-
- # XXX add comment (groggi)
-
- newnewops = [] # XXX better name... (groggi)
- moving_output_list = {}
- known_pointers = []
+ # the key is an operation that contains a ConstPtr as an argument and
+ # this ConstPtrs pointer might change as it points to an object that
+ # can't be made non-moving (e.g. the object is pinned).
+ ops_with_movable_const_ptr = {}
+ #
+ # a list of such not really constant ConstPtrs.
+ changeable_const_pointers = []
for op in newops:
- self._record_constptrs(op, gcrefs_output_list, moving_output_list,
- known_pointers)
+ # record all GCREFs, because the GC (or Boehm) cannot see them and
+ # keep them alive if they end up as constants in the assembler.
+ # If such a GCREF can change and we can't make the object it points
+ # to non-movable, we have to handle it seperatly. Such GCREF's are
+ # returned as ConstPtrs in 'changeable_const_pointers' and the
+ # affected operation is returned in 'op_with_movable_const_ptr'.
+ # For this special case see 'rewrite_changeable_constptrs'.
+ self._record_constptrs(op, gcrefs_output_list,
+ ops_with_movable_const_ptr, changeable_const_pointers)
#
- if len(moving_output_list) > 0:
- pinned_obj_tracker = PinnedObjectTracker(cpu, known_pointers)
+ # handle pointers that are not guaranteed to stay the same
+ if len(ops_with_movable_const_ptr) > 0:
+ moving_obj_tracker = MovableObjectTracker(cpu, changeable_const_pointers)
+ #
if not we_are_translated():
- self.last_pinned_object_tracker = pinned_obj_tracker
- gcrefs_output_list.append(pinned_obj_tracker.ref_array_gcref)
- rgc._make_sure_does_not_move(pinned_obj_tracker.ref_array_gcref)
+ # used for testing
+ self.last_moving_obj_tracker = moving_obj_tracker
+ # make sure the array containing the pointers is not collected by
+ # the GC (or Boehm)
+ gcrefs_output_list.append(moving_obj_tracker.ptr_array_gcref)
+ rgc._make_sure_does_not_move(moving_obj_tracker.ptr_array_gcref)
- for op in newops:
- if op in moving_output_list:
- reops = self._rewrite_constptrs(op, moving_output_list,
- pinned_obj_tracker)
- newnewops.extend(reops)
+ ops = newops
+ newops = []
+ for op in ops:
+ if op in ops_with_movable_const_ptr:
+ rewritten_ops = self._rewrite_changeable_constptrs(op,
+ ops_with_movable_const_ptr, moving_obj_tracker)
+ newops.extend(rewritten_ops)
else:
- newnewops.append(op)
- #
- return newnewops
- else:
- return newops
+ newops.append(op)
+ #
+ return newops
@specialize.memo()
def getframedescrs(self, cpu):
diff --git a/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py b/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py
--- a/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py
+++ b/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py
@@ -3,7 +3,7 @@
get_field_descr, get_array_descr, ArrayDescr, FieldDescr,\
SizeDescrWithVTable, get_interiorfield_descr
from rpython.jit.backend.llsupport.gc import GcLLDescr_boehm,\
- GcLLDescr_framework, PinnedObjectTracker
+ GcLLDescr_framework, MovableObjectTracker
from rpython.jit.backend.llsupport import jitframe, gc
from rpython.jit.metainterp.gc import get_description
from rpython.jit.tool.oparser import parse
@@ -45,7 +45,7 @@
notpinned_obj_ptr = lltype.malloc(notpinned_obj_type)
notpinned_obj_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, notpinned_obj_ptr)
#
- ref_array_descr = self.cpu.arraydescrof(PinnedObjectTracker._ref_array_type)
+ ptr_array_descr = self.cpu.arraydescrof(MovableObjectTracker.ptr_array_type)
#
vtable_descr = self.gc_ll_descr.fielddescr_vtable
O = lltype.GcStruct('O', ('parent', rclass.OBJECT),
@@ -92,9 +92,9 @@
[])
# make the array containing the GCREF's accessible inside the tests.
# This must be done after we call 'rewrite_assembler'. Before that
- # call 'last_pinned_object_tracker' is None or filled with some old
+ # call 'last_moving_obj_tracker' is None or filled with some old
# value.
- namespace['ref_array_gcref'] = self.gc_ll_descr.last_pinned_object_tracker.ref_array_gcref
+ namespace['ptr_array_gcref'] = self.gc_ll_descr.last_moving_obj_tracker.ptr_array_gcref
expected = parse(to_operations % Evaluator(namespace),
namespace=namespace)
equaloplists(operations, expected.operations)
@@ -127,7 +127,7 @@
i0 = getfield_gc(ConstPtr(pinned_obj_gcref), descr=pinned_obj_my_int_descr)
""", """
[]
- p1 = getarrayitem_gc(ConstPtr(ref_array_gcref), 0, descr=ref_array_descr)
+ p1 = getarrayitem_gc(ConstPtr(ptr_array_gcref), 0, descr=ptr_array_descr)
i0 = getfield_gc(p1, descr=pinned_obj_my_int_descr)
""")
@@ -139,9 +139,9 @@
i2 = getfield_gc(ConstPtr(pinned_obj_gcref), descr=pinned_obj_my_int_descr)
""", """
[]
- p1 = getarrayitem_gc(ConstPtr(ref_array_gcref), 0, descr=ref_array_descr)
+ p1 = getarrayitem_gc(ConstPtr(ptr_array_gcref), 0, descr=ptr_array_descr)
i0 = getfield_gc(p1, descr=pinned_obj_my_int_descr)
i1 = getfield_gc(ConstPtr(notpinned_obj_gcref), descr=notpinned_obj_my_int_descr)
- p2 = getarrayitem_gc(ConstPtr(ref_array_gcref), 1, descr=ref_array_descr)
+ p2 = getarrayitem_gc(ConstPtr(ptr_array_gcref), 1, descr=ptr_array_descr)
i2 = getfield_gc(p2, descr=pinned_obj_my_int_descr)
""")
From noreply at buildbot.pypy.org Sat Aug 16 20:47:40 2014
From: noreply at buildbot.pypy.org (pjenvey)
Date: Sat, 16 Aug 2014 20:47:40 +0200 (CEST)
Subject: [pypy-commit] pypy py3.3-fixes2: Close branch py3.3-fixes2
Message-ID: <20140816184740.1B6BB1C0157@cobra.cs.uni-duesseldorf.de>
Author: Philip Jenvey
Branch: py3.3-fixes2
Changeset: r72827:fa18c2a58936
Date: 2014-08-16 11:47 -0700
http://bitbucket.org/pypy/pypy/changeset/fa18c2a58936/
Log: Close branch py3.3-fixes2
From noreply at buildbot.pypy.org Sat Aug 16 20:47:49 2014
From: noreply at buildbot.pypy.org (pjenvey)
Date: Sat, 16 Aug 2014 20:47:49 +0200 (CEST)
Subject: [pypy-commit] pypy py3.3: Merged in numerodix/pypy/py3.3-fixes2
(pull request #268)
Message-ID: <20140816184749.9D7581C0157@cobra.cs.uni-duesseldorf.de>
Author: Philip Jenvey
Branch: py3.3
Changeset: r72828:c046f2fe5239
Date: 2014-08-16 11:47 -0700
http://bitbucket.org/pypy/pypy/changeset/c046f2fe5239/
Log: Merged in numerodix/pypy/py3.3-fixes2 (pull request #268)
[py3.3] bz2: disallow pickling for compressor/decompressor (cpython
compat)
From noreply at buildbot.pypy.org Sat Aug 16 22:49:13 2014
From: noreply at buildbot.pypy.org (mattip)
Date: Sat, 16 Aug 2014 22:49:13 +0200 (CEST)
Subject: [pypy-commit] pypy ufuncapi: test_ufunc leaks references but passes
test, how to create an array of function pointers?
Message-ID: <20140816204913.CBB611C0157@cobra.cs.uni-duesseldorf.de>
Author: mattip
Branch: ufuncapi
Changeset: r72829:422aa6836286
Date: 2014-08-16 23:41 +0300
http://bitbucket.org/pypy/pypy/changeset/422aa6836286/
Log: test_ufunc leaks references but passes test, how to create an array
of function pointers?
diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py
--- a/pypy/module/cpyext/ndarrayobject.py
+++ b/pypy/module/cpyext/ndarrayobject.py
@@ -277,11 +277,10 @@
arg_i = args_w[i]
assert isinstance(arg_i, W_NDimArray)
raw_storage_setitem(dataps, CCHARP_SIZE * i, rffi.cast(rffi.CCHARP, arg_i.implementation.storage))
- #This assumes we iterate over the last dimension?
- raw_storage_setitem(dims, LONG_SIZE * i, rffi.cast(rffi.LONG, arg_i.get_shape()[0]))
- raw_storage_setitem(steps, LONG_SIZE * i, rffi.cast(rffi.LONG, arg_i.implementation.strides[0]))
+ #This assumes we iterate over the whole array (it should be a view...)
+ raw_storage_setitem(dims, LONG_SIZE * i, rffi.cast(rffi.LONG, arg_i.get_size()))
+ raw_storage_setitem(steps, LONG_SIZE * i, rffi.cast(rffi.LONG, arg_i.get_dtype().elsize))
try:
- import pdb;pdb.set_trace()
self.func(rffi.cast(rffi.CArrayPtr(rffi.CCHARP), dataps),
rffi.cast(npy_intpp, dims), rffi.cast(npy_intpp, steps), user_data)
except:
@@ -299,7 +298,7 @@
GenericUfunc = lltype.FuncType([rffi.CArrayPtr(rffi.CCHARP), npy_intpp, npy_intpp,
rffi.VOIDP], lltype.Void)
gufunctype = lltype.Ptr(GenericUfunc)
- at cpython_api([rffi.CArrayPtr(gufunctype), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t,
+ at cpython_api([gufunctype, rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t,
Py_ssize_t, Py_ssize_t, rffi.CCHARP, rffi.CCHARP, Py_ssize_t,
rffi.CCHARP], PyObject)
def _PyUFunc_FromFuncAndDataAndSignature(space, funcs, data, types, ntypes,
@@ -307,7 +306,7 @@
funcs_w = [None] * ntypes
dtypes_w = [None] * ntypes * (nin + nout)
for i in range(ntypes):
- funcs_w[i] = W_GenericUFuncCaller(funcs[i])
+ funcs_w[i] = W_GenericUFuncCaller(funcs)
for i in range(ntypes*(nin+nout)):
dtypes_w[i] = get_dtype_cache(space).dtypes_by_num[ord(types[i])]
w_funcs = space.newlist(funcs_w)
diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py
--- a/pypy/module/cpyext/test/test_ndarrayobject.py
+++ b/pypy/module/cpyext/test/test_ndarrayobject.py
@@ -314,14 +314,16 @@
raises(TypeError, "mod.check_array(42)")
def test_ufunc(self):
- from _numpypy.multiarray import ndarray
+ from _numpypy.multiarray import arange
mod = self.import_extension('foo', [
("create_ufunc", "METH_NOARGS",
"""
PyUFuncGenericFunction funcs[] = {&double_times2, &int_times2};
char types[] = { NPY_DOUBLE,NPY_DOUBLE, NPY_INT, NPY_INT };
void *array_data[] = {NULL, NULL};
- PyObject * retval = _PyUFunc_FromFuncAndDataAndSignature(funcs,
+ PyObject * retval;
+ /* XXX should be 'funcs', not 'funcs[1]' but how to define an array of function pointers? */
+ retval = _PyUFunc_FromFuncAndDataAndSignature(funcs[1],
array_data, types, 2, 1, 1, PyUFunc_None,
"times2", "times2_docstring", 0, "()->()");
Py_INCREF(retval);
@@ -361,7 +363,6 @@
char *in = args[0], *out=args[1];
npy_intp in_step = steps[0], out_step = steps[1];
int tmp;
-
for (i = 0; i < n; i++) {
/*BEGIN main ufunc computation*/
tmp = *(int *)in;
@@ -374,6 +375,6 @@
};
}; ''')
times2 = mod.create_ufunc()
- arr = ndarray((3, 4), dtype='i')
+ arr = arange(12, dtype='i').reshape(3, 4)
out = times2(arr)
- assert (out == [6, 8]).all()
+ assert (out == arr * 2).all()
From noreply at buildbot.pypy.org Sat Aug 16 22:49:15 2014
From: noreply at buildbot.pypy.org (mattip)
Date: Sat, 16 Aug 2014 22:49:15 +0200 (CEST)
Subject: [pypy-commit] pypy ufuncapi: document where api is still not
compatable (help will be appreciated)
Message-ID: <20140816204915.2870D1C0157@cobra.cs.uni-duesseldorf.de>
Author: mattip
Branch: ufuncapi
Changeset: r72830:8e348173655b
Date: 2014-08-16 23:48 +0300
http://bitbucket.org/pypy/pypy/changeset/8e348173655b/
Log: document where api is still not compatable (help will be
appreciated)
diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py
--- a/pypy/module/cpyext/ndarrayobject.py
+++ b/pypy/module/cpyext/ndarrayobject.py
@@ -283,9 +283,6 @@
try:
self.func(rffi.cast(rffi.CArrayPtr(rffi.CCHARP), dataps),
rffi.cast(npy_intpp, dims), rffi.cast(npy_intpp, steps), user_data)
- except:
- import traceback; traceback.print_exc()
- raise
finally:
free_raw_storage(dataps, track_allocation=False)
free_raw_storage(dims, track_allocation=False)
@@ -298,6 +295,8 @@
GenericUfunc = lltype.FuncType([rffi.CArrayPtr(rffi.CCHARP), npy_intpp, npy_intpp,
rffi.VOIDP], lltype.Void)
gufunctype = lltype.Ptr(GenericUfunc)
+# XXX the signature is wrong, it should be an array of gufunctype, but
+# XXX rffi.CArrayPtr(gufunctype) does not seem to work ???
@cpython_api([gufunctype, rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t,
Py_ssize_t, Py_ssize_t, rffi.CCHARP, rffi.CCHARP, Py_ssize_t,
rffi.CCHARP], PyObject)
@@ -306,6 +305,7 @@
funcs_w = [None] * ntypes
dtypes_w = [None] * ntypes * (nin + nout)
for i in range(ntypes):
+ # XXX this should be 'funcs[i]' not 'funcs'
funcs_w[i] = W_GenericUFuncCaller(funcs)
for i in range(ntypes*(nin+nout)):
dtypes_w[i] = get_dtype_cache(space).dtypes_by_num[ord(types[i])]
diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py
--- a/pypy/module/cpyext/test/test_ndarrayobject.py
+++ b/pypy/module/cpyext/test/test_ndarrayobject.py
@@ -322,7 +322,8 @@
char types[] = { NPY_DOUBLE,NPY_DOUBLE, NPY_INT, NPY_INT };
void *array_data[] = {NULL, NULL};
PyObject * retval;
- /* XXX should be 'funcs', not 'funcs[1]' but how to define an array of function pointers? */
+ /* XXX should be 'funcs', not 'funcs[1]' but how to define an array of
+ function pointers in ndarrayobject.py? */
retval = _PyUFunc_FromFuncAndDataAndSignature(funcs[1],
array_data, types, 2, 1, 1, PyUFunc_None,
"times2", "times2_docstring", 0, "()->()");
From noreply at buildbot.pypy.org Sat Aug 16 23:08:42 2014
From: noreply at buildbot.pypy.org (numerodix)
Date: Sat, 16 Aug 2014 23:08:42 +0200 (CEST)
Subject: [pypy-commit] pypy py3.3-fixes2: bz2: disallow pickling for
compressor/decompresson (cpython compat)
Message-ID: <20140816210842.613731C02ED@cobra.cs.uni-duesseldorf.de>
Author: Martin Matusiak
Branch: py3.3-fixes2
Changeset: r72831:a11bfe6cc0b1
Date: 2014-08-15 23:50 +0200
http://bitbucket.org/pypy/pypy/changeset/a11bfe6cc0b1/
Log: bz2: disallow pickling for compressor/decompresson (cpython compat)
diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py
--- a/pypy/module/bz2/interp_bz2.py
+++ b/pypy/module/bz2/interp_bz2.py
@@ -268,6 +268,10 @@
BZ2_bzCompressEnd(self.bzs)
lltype.free(self.bzs, flavor='raw')
+ def __getstate__(self):
+ raise OperationError(self.space.w_TypeError,
+ self.space.wrap("cannot serialize '_bz2.BZ2Compressor' object"))
+
@unwrap_spec(data='bufferstr')
def compress(self, data):
"""compress(data) -> string
@@ -333,6 +337,7 @@
W_BZ2Compressor.typedef = TypeDef("_bz2.BZ2Compressor",
__doc__ = W_BZ2Compressor.__doc__,
__new__ = interp2app(descr_compressor__new__),
+ __getstate__ = interp2app(W_BZ2Compressor.__getstate__),
compress = interp2app(W_BZ2Compressor.compress),
flush = interp2app(W_BZ2Compressor.flush),
)
@@ -372,6 +377,10 @@
BZ2_bzDecompressEnd(self.bzs)
lltype.free(self.bzs, flavor='raw')
+ def __getstate__(self):
+ raise OperationError(self.space.w_TypeError,
+ self.space.wrap("cannot serialize '_bz2.BZ2Decompressor' object"))
+
def eof_w(self, space):
if self.running:
return space.w_False
@@ -429,6 +438,7 @@
W_BZ2Decompressor.typedef = TypeDef("_bz2.BZ2Decompressor",
__doc__ = W_BZ2Decompressor.__doc__,
__new__ = interp2app(descr_decompressor__new__),
+ __getstate__ = interp2app(W_BZ2Decompressor.__getstate__),
unused_data = interp_attrproperty_bytes("unused_data", W_BZ2Decompressor),
eof = GetSetProperty(W_BZ2Decompressor.eof_w),
decompress = interp2app(W_BZ2Decompressor.decompress),
diff --git a/pypy/module/bz2/test/test_bz2_compdecomp.py b/pypy/module/bz2/test/test_bz2_compdecomp.py
--- a/pypy/module/bz2/test/test_bz2_compdecomp.py
+++ b/pypy/module/bz2/test/test_bz2_compdecomp.py
@@ -108,6 +108,13 @@
data += bz2c.flush()
assert self.decompress(data) == self.TEXT
+ def test_compressor_pickle_error(self):
+ from bz2 import BZ2Compressor
+ import pickle
+
+ exc = raises(TypeError, pickle.dumps, BZ2Compressor())
+ assert exc.value.args[0] == "cannot serialize '_bz2.BZ2Compressor' object"
+
class AppTestBZ2Decompressor(CheckAllocation):
spaceconfig = dict(usemodules=('bz2', 'rctime'))
@@ -186,6 +193,13 @@
assert decompressed_data == b''
raises(IOError, bz2d.decompress, self.BUGGY_DATA)
+ def test_decompressor_pickle_error(self):
+ from bz2 import BZ2Decompressor
+ import pickle
+
+ exc = raises(TypeError, pickle.dumps, BZ2Decompressor())
+ assert exc.value.args[0] == "cannot serialize '_bz2.BZ2Decompressor' object"
+
class AppTestBZ2ModuleFunctions(CheckAllocation):
spaceconfig = dict(usemodules=('bz2', 'rctime'))
From noreply at buildbot.pypy.org Sat Aug 16 23:08:43 2014
From: noreply at buildbot.pypy.org (numerodix)
Date: Sat, 16 Aug 2014 23:08:43 +0200 (CEST)
Subject: [pypy-commit] pypy py3.3-fixes2: prefer oefmt over OperationError
Message-ID: <20140816210843.A4C771C02ED@cobra.cs.uni-duesseldorf.de>
Author: Martin Matusiak
Branch: py3.3-fixes2
Changeset: r72832:50ea833f79dd
Date: 2014-08-16 06:57 +0200
http://bitbucket.org/pypy/pypy/changeset/50ea833f79dd/
Log: prefer oefmt over OperationError
diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py
--- a/pypy/module/bz2/interp_bz2.py
+++ b/pypy/module/bz2/interp_bz2.py
@@ -2,7 +2,7 @@
from rpython.rtyper.tool import rffi_platform as platform
from rpython.rtyper.lltypesystem import rffi
from rpython.rtyper.lltypesystem import lltype
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.typedef import TypeDef, interp_attrproperty_bytes
from pypy.interpreter.typedef import GetSetProperty
@@ -269,8 +269,7 @@
lltype.free(self.bzs, flavor='raw')
def __getstate__(self):
- raise OperationError(self.space.w_TypeError,
- self.space.wrap("cannot serialize '_bz2.BZ2Compressor' object"))
+ raise oefmt(self.space.w_TypeError, "cannot serialize '%T' object", self)
@unwrap_spec(data='bufferstr')
def compress(self, data):
@@ -378,8 +377,7 @@
lltype.free(self.bzs, flavor='raw')
def __getstate__(self):
- raise OperationError(self.space.w_TypeError,
- self.space.wrap("cannot serialize '_bz2.BZ2Decompressor' object"))
+ raise oefmt(self.space.w_TypeError, "cannot serialize '%T' object", self)
def eof_w(self, space):
if self.running:
From noreply at buildbot.pypy.org Sat Aug 16 23:08:44 2014
From: noreply at buildbot.pypy.org (pjenvey)
Date: Sat, 16 Aug 2014 23:08:44 +0200 (CEST)
Subject: [pypy-commit] pypy py3.3-fixes2: close branch
Message-ID: <20140816210844.C3B631C02ED@cobra.cs.uni-duesseldorf.de>
Author: Philip Jenvey
Branch: py3.3-fixes2
Changeset: r72833:e4d0e6a3c726
Date: 2014-08-16 14:05 -0700
http://bitbucket.org/pypy/pypy/changeset/e4d0e6a3c726/
Log: close branch
From noreply at buildbot.pypy.org Sat Aug 16 23:08:45 2014
From: noreply at buildbot.pypy.org (pjenvey)
Date: Sat, 16 Aug 2014 23:08:45 +0200 (CEST)
Subject: [pypy-commit] pypy py3.3: merge py3.3-fixes2
Message-ID: <20140816210845.F22761C02ED@cobra.cs.uni-duesseldorf.de>
Author: Philip Jenvey
Branch: py3.3
Changeset: r72834:ca985b528468
Date: 2014-08-16 14:06 -0700
http://bitbucket.org/pypy/pypy/changeset/ca985b528468/
Log: merge py3.3-fixes2
diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py
--- a/pypy/module/bz2/interp_bz2.py
+++ b/pypy/module/bz2/interp_bz2.py
@@ -2,7 +2,7 @@
from rpython.rtyper.tool import rffi_platform as platform
from rpython.rtyper.lltypesystem import rffi
from rpython.rtyper.lltypesystem import lltype
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.typedef import TypeDef, interp_attrproperty_bytes
from pypy.interpreter.typedef import GetSetProperty
@@ -268,6 +268,9 @@
BZ2_bzCompressEnd(self.bzs)
lltype.free(self.bzs, flavor='raw')
+ def __getstate__(self):
+ raise oefmt(self.space.w_TypeError, "cannot serialize '%T' object", self)
+
@unwrap_spec(data='bufferstr')
def compress(self, data):
"""compress(data) -> string
@@ -333,6 +336,7 @@
W_BZ2Compressor.typedef = TypeDef("_bz2.BZ2Compressor",
__doc__ = W_BZ2Compressor.__doc__,
__new__ = interp2app(descr_compressor__new__),
+ __getstate__ = interp2app(W_BZ2Compressor.__getstate__),
compress = interp2app(W_BZ2Compressor.compress),
flush = interp2app(W_BZ2Compressor.flush),
)
@@ -372,6 +376,9 @@
BZ2_bzDecompressEnd(self.bzs)
lltype.free(self.bzs, flavor='raw')
+ def __getstate__(self):
+ raise oefmt(self.space.w_TypeError, "cannot serialize '%T' object", self)
+
def eof_w(self, space):
if self.running:
return space.w_False
@@ -429,6 +436,7 @@
W_BZ2Decompressor.typedef = TypeDef("_bz2.BZ2Decompressor",
__doc__ = W_BZ2Decompressor.__doc__,
__new__ = interp2app(descr_decompressor__new__),
+ __getstate__ = interp2app(W_BZ2Decompressor.__getstate__),
unused_data = interp_attrproperty_bytes("unused_data", W_BZ2Decompressor),
eof = GetSetProperty(W_BZ2Decompressor.eof_w),
decompress = interp2app(W_BZ2Decompressor.decompress),
diff --git a/pypy/module/bz2/test/test_bz2_compdecomp.py b/pypy/module/bz2/test/test_bz2_compdecomp.py
--- a/pypy/module/bz2/test/test_bz2_compdecomp.py
+++ b/pypy/module/bz2/test/test_bz2_compdecomp.py
@@ -108,6 +108,13 @@
data += bz2c.flush()
assert self.decompress(data) == self.TEXT
+ def test_compressor_pickle_error(self):
+ from bz2 import BZ2Compressor
+ import pickle
+
+ exc = raises(TypeError, pickle.dumps, BZ2Compressor())
+ assert exc.value.args[0] == "cannot serialize '_bz2.BZ2Compressor' object"
+
class AppTestBZ2Decompressor(CheckAllocation):
spaceconfig = dict(usemodules=('bz2', 'rctime'))
@@ -186,6 +193,13 @@
assert decompressed_data == b''
raises(IOError, bz2d.decompress, self.BUGGY_DATA)
+ def test_decompressor_pickle_error(self):
+ from bz2 import BZ2Decompressor
+ import pickle
+
+ exc = raises(TypeError, pickle.dumps, BZ2Decompressor())
+ assert exc.value.args[0] == "cannot serialize '_bz2.BZ2Decompressor' object"
+
class AppTestBZ2ModuleFunctions(CheckAllocation):
spaceconfig = dict(usemodules=('bz2', 'rctime'))
From noreply at buildbot.pypy.org Sun Aug 17 03:07:12 2014
From: noreply at buildbot.pypy.org (pjenvey)
Date: Sun, 17 Aug 2014 03:07:12 +0200 (CEST)
Subject: [pypy-commit] pypy py3.3: pep8 whitespace
Message-ID: <20140817010712.2E5E51C3436@cobra.cs.uni-duesseldorf.de>
Author: Philip Jenvey
Branch: py3.3
Changeset: r72835:62b5d70f4e91
Date: 2014-08-16 17:30 -0700
http://bitbucket.org/pypy/pypy/changeset/62b5d70f4e91/
Log: pep8 whitespace
diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py
--- a/pypy/interpreter/pycode.py
+++ b/pypy/interpreter/pycode.py
@@ -61,7 +61,7 @@
else:
varargname = None
if code.co_flags & CO_VARKEYWORDS:
- kwargname = code.co_varnames[argcount+kwonlyargcount]
+ kwargname = code.co_varnames[argcount + kwonlyargcount]
argcount += 1
else:
kwargname = None
From noreply at buildbot.pypy.org Sun Aug 17 03:07:13 2014
From: noreply at buildbot.pypy.org (pjenvey)
Date: Sun, 17 Aug 2014 03:07:13 +0200 (CEST)
Subject: [pypy-commit] pypy py3k: fix handling of None values in kw_defaults,
which are valid. found by py3.3's
Message-ID: <20140817010713.74E191C3436@cobra.cs.uni-duesseldorf.de>
Author: Philip Jenvey
Branch: py3k
Changeset: r72836:7da16b3cbdc2
Date: 2014-08-16 18:04 -0700
http://bitbucket.org/pypy/pypy/changeset/7da16b3cbdc2/
Log: fix handling of None values in kw_defaults, which are valid. found
by py3.3's tests
diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py
--- a/pypy/interpreter/astcompiler/ast.py
+++ b/pypy/interpreter/astcompiler/ast.py
@@ -2416,7 +2416,8 @@
self.kw_defaults = None
if self.kw_defaults is not None:
for node in self.kw_defaults:
- node.sync_app_attrs(space)
+ if node:
+ node.sync_app_attrs(space)
class arg(AST):
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -1034,6 +1034,13 @@
exec('# -*- coding: utf-8 -*-\n\nu = "\xf0\x9f\x92\x8b"', d)
assert len(d['u']) == 4
+ def test_kw_defaults_None(self):
+ import _ast
+ source = "def foo(self, *args, name): pass"
+ ast = compile(source, '', 'exec', _ast.PyCF_ONLY_AST)
+ # compiling the produced AST previously triggered a crash
+ compile(ast, '', 'exec')
+
class TestOptimizations:
def count_instructions(self, source):
From noreply at buildbot.pypy.org Sun Aug 17 03:07:14 2014
From: noreply at buildbot.pypy.org (pjenvey)
Date: Sun, 17 Aug 2014 03:07:14 +0200 (CEST)
Subject: [pypy-commit] pypy py3.3: merge py3k
Message-ID: <20140817010714.D94BB1C3436@cobra.cs.uni-duesseldorf.de>
Author: Philip Jenvey
Branch: py3.3
Changeset: r72837:3d71999bc05e
Date: 2014-08-16 18:04 -0700
http://bitbucket.org/pypy/pypy/changeset/3d71999bc05e/
Log: merge py3k
diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py
--- a/pypy/interpreter/astcompiler/ast.py
+++ b/pypy/interpreter/astcompiler/ast.py
@@ -2443,7 +2443,8 @@
self.kw_defaults = None
if self.kw_defaults is not None:
for node in self.kw_defaults:
- node.sync_app_attrs(space)
+ if node:
+ node.sync_app_attrs(space)
class arg(AST):
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -1052,6 +1052,13 @@
exec('# -*- coding: utf-8 -*-\n\nu = "\xf0\x9f\x92\x8b"', d)
assert len(d['u']) == 4
+ def test_kw_defaults_None(self):
+ import _ast
+ source = "def foo(self, *args, name): pass"
+ ast = compile(source, '', 'exec', _ast.PyCF_ONLY_AST)
+ # compiling the produced AST previously triggered a crash
+ compile(ast, '', 'exec')
+
class TestOptimizations:
def count_instructions(self, source):
From noreply at buildbot.pypy.org Sun Aug 17 03:07:16 2014
From: noreply at buildbot.pypy.org (pjenvey)
Date: Sun, 17 Aug 2014 03:07:16 +0200 (CEST)
Subject: [pypy-commit] pypy py3.3: fix handling of BoolOp.values when it's
None
Message-ID: <20140817010716.29FDD1C3436@cobra.cs.uni-duesseldorf.de>
Author: Philip Jenvey
Branch: py3.3
Changeset: r72838:f0974e24fc50
Date: 2014-08-16 18:04 -0700
http://bitbucket.org/pypy/pypy/changeset/f0974e24fc50/
Log: fix handling of BoolOp.values when it's None
diff --git a/pypy/interpreter/astcompiler/test/test_validate.py b/pypy/interpreter/astcompiler/test/test_validate.py
--- a/pypy/interpreter/astcompiler/test/test_validate.py
+++ b/pypy/interpreter/astcompiler/test/test_validate.py
@@ -237,6 +237,8 @@
def test_boolop(self):
b = ast.BoolOp(ast.And, [], 0, 0)
self.expr(b, "less than 2 values")
+ b = ast.BoolOp(ast.And, None, 0, 0)
+ self.expr(b, "less than 2 values")
b = ast.BoolOp(ast.And, [ast.Num(self.space.wrap(3), 0, 0)], 0, 0)
self.expr(b, "less than 2 values")
b = ast.BoolOp(ast.And, [ast.Num(self.space.wrap(4), 0, 0), None], 0, 0)
diff --git a/pypy/interpreter/astcompiler/validate.py b/pypy/interpreter/astcompiler/validate.py
--- a/pypy/interpreter/astcompiler/validate.py
+++ b/pypy/interpreter/astcompiler/validate.py
@@ -295,7 +295,7 @@
pass
def visit_BoolOp(self, node):
- if len(node.values) < 2:
+ if self._len(node.values) < 2:
raise ValidationError("BoolOp with less than 2 values")
self._validate_exprs(node.values)
From noreply at buildbot.pypy.org Sun Aug 17 10:26:14 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Sun, 17 Aug 2014 10:26:14 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Kill the stm's jitdriver
transformation
Message-ID: <20140817082614.EFBD61C332E@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72839:987cc6fc0366
Date: 2014-08-16 17:05 +0200
http://bitbucket.org/pypy/pypy/changeset/987cc6fc0366/
Log: Kill the stm's jitdriver transformation
diff --git a/rpython/translator/stm/jitdriver.py b/rpython/translator/stm/jitdriver.py
deleted file mode 100644
--- a/rpython/translator/stm/jitdriver.py
+++ /dev/null
@@ -1,269 +0,0 @@
-from rpython.rtyper.lltypesystem import lltype, rclass
-from rpython.flowspace.model import checkgraph, copygraph
-from rpython.flowspace.model import Block, Link, SpaceOperation, Constant
-from rpython.translator.unsimplify import split_block, varoftype
-from rpython.annotator.model import s_Int
-from rpython.rtyper.llannotation import lltype_to_annotation
-from rpython.rtyper.annlowlevel import (MixLevelHelperAnnotator,
- cast_base_ptr_to_instance)
-from rpython.rlib import rstm
-from rpython.tool.sourcetools import compile2
-from rpython.translator.c.support import log
-
-def find_jit_merge_point(graph, relaxed=False):
- found = []
- for block in graph.iterblocks():
- for i in range(len(block.operations)):
- op = block.operations[i]
- if (op.opname == 'jit_marker'
- and op.args[0].value == 'jit_merge_point'):
- jitdriver = op.args[1].value
- if not jitdriver.autoreds:
- if jitdriver.stm_do_transaction_breaks:
- found.append((block, i))
- else:
- log.WARNING("ignoring non-stm jitdriver in %r" % (
- graph,))
- else:
- log.WARNING("ignoring jitdriver with autoreds in %r" % (
- graph,)) # XXX XXX!
-
- assert len(found) <= 1, "several jit_merge_point's in %r" % (graph,)
- if found:
- return found[0]
- else:
- return None
-
-def reorganize_around_jit_driver(stmtransformer, graph):
- location = find_jit_merge_point(graph)
- if location is not None:
- JitDriverSplitter(stmtransformer, graph).split(location)
-
-# ____________________________________________________________
-
-
-class JitDriverSplitter(object):
- #
- # def graph(..): | def graph(..):
- # stuff_before | stuff_before
- # while 1: ====> while 1:
- # jit_merge_point() | if should_break_transaction():
- # stuff_after | return invoke_stm(..)
- # | stuff_after
- # ----------------------------+
- #
- # def invoke_stm(..):
- # p = new container object
- # store (green args, red args) into p
- # perform_transaction(callback, p)
- # if p.got_exception: raise p.got_exception
- # return p.result_value
- #
- # (note that perform_transaction() itself will fill p.got_exception)
- #
- # def callback(p, retry_counter):
- # fish (green args, red args) from p
- # while 1:
- # stuff_after
- # if should_break_transaction():
- # store (green args, red args) into p
- # return 1 # causes perform_tr() to loop and call us again
- # p.result_value = result_value
- # return 0 # stop perform_tr() and returns
-
- def __init__(self, stmtransformer, graph):
- self.stmtransformer = stmtransformer
- self.main_graph = graph
- self.RESTYPE = graph.getreturnvar().concretetype
-
- def split(self, portal_location):
- self.check_jitdriver(portal_location)
- self.split_after_jit_merge_point(portal_location)
- self.make_container_type()
- #
- rtyper = self.stmtransformer.translator.rtyper
- self.mixlevelannotator = MixLevelHelperAnnotator(rtyper)
- self.make_callback_function()
- self.make_invoke_stm_function()
- self.rewrite_main_graph()
- self.mixlevelannotator.finish()
-
- def check_jitdriver(self, (portalblock, portalopindex)):
- op_jitmarker = portalblock.operations[portalopindex]
- assert op_jitmarker.opname == 'jit_marker'
- assert op_jitmarker.args[0].value == 'jit_merge_point'
- jitdriver = op_jitmarker.args[1].value
- assert not jitdriver.autoreds # fix me
-
- def split_after_jit_merge_point(self, (portalblock, portalopindex)):
- link = split_block(None, portalblock, portalopindex + 1)
- self.TYPES = [v.concretetype for v in link.args]
-
- def make_container_type(self):
- args = [('a%d' % i, self.TYPES[i]) for i in range(len(self.TYPES))]
- self.CONTAINER = lltype.GcStruct('StmArgs',
- ('result_value', self.RESTYPE),
- ('got_exception', rclass.OBJECTPTR),
- *args)
- self.CONTAINERP = lltype.Ptr(self.CONTAINER)
-
- def add_call_should_break_transaction(self, block):
- # add a should_break_transaction() call at the end of the block,
- # turn the following link into an "if False" link, add a new
- # "if True" link going to a fresh new block, and return this new
- # block.
- v2 = varoftype(lltype.Bool)
- block.operations.append(
- SpaceOperation('stm_should_break_transaction', [], v2))
- #
- assert block.exitswitch is None
- [link] = block.exits
- block.exitswitch = v2
- link.exitcase = False
- link.llexitcase = False
- newblock = Block([varoftype(v.concretetype) for v in link.args])
- otherlink = Link(link.args[:], newblock)
- otherlink.exitcase = True
- otherlink.llexitcase = True
- block.recloseblock(link, otherlink)
- return newblock
-
- def rewrite_main_graph(self):
- # add 'should_break_transaction()'
- main_graph = self.main_graph
- block1, i = find_jit_merge_point(main_graph, relaxed=True)
- assert i == len(block1.operations) - 1
- del block1.operations[i]
- blockf = self.add_call_should_break_transaction(block1)
- #
- # fill in blockf with a call to invoke_stm()
- v = varoftype(self.RESTYPE, 'result')
- op = SpaceOperation('direct_call',
- [self.c_invoke_stm_func] + blockf.inputargs, v)
- blockf.operations.append(op)
- blockf.closeblock(Link([v], main_graph.returnblock))
- #
- checkgraph(main_graph)
-
- def make_invoke_stm_function(self):
- CONTAINER = self.CONTAINER
- callback = self.callback_function
- perform_transaction = rstm.make_perform_transaction(callback,
- self.CONTAINERP)
- irange = range(len(self.TYPES))
- source = """if 1:
- def ll_invoke_stm(%s):
- p = lltype.malloc(CONTAINER)
- %s
- perform_transaction(p)
- if p.got_exception:
- raise cast_base_ptr_to_instance(Exception, p.got_exception)
- return p.result_value
-""" % (', '.join(['a%d' % i for i in irange]),
- '; '.join(['p.a%d = a%d' % (i, i) for i in irange]))
- d = {'CONTAINER': CONTAINER,
- 'lltype': lltype,
- 'perform_transaction': perform_transaction,
- 'cast_base_ptr_to_instance': cast_base_ptr_to_instance,
- }
- exec compile2(source) in d
- ll_invoke_stm = d['ll_invoke_stm']
- #
- mix = self.mixlevelannotator
- c_func = mix.constfunc(ll_invoke_stm,
- map(lltype_to_annotation, self.TYPES),
- lltype_to_annotation(self.RESTYPE))
- self.c_invoke_stm_func = c_func
-
- def container_var(self):
- return varoftype(self.CONTAINERP, 'stmargs')
-
- def make_callback_function(self):
- # make a copy of the 'main_graph'
- callback_graph = copygraph(self.main_graph)
- callback_graph.name += '_stm'
- self.callback_graph = callback_graph
- self.stmtransformer.translator.graphs.append(callback_graph)
- #for v1, v2 in zip(
- # self.main_graph.getargs() + [self.main_graph.getreturnvar()],
- # callback_graph.getargs() + [callback_graph.getreturnvar()]):
- # self.stmtransformer.translator.annotator.transfer_binding(v2, v1)
- #
- # make a new startblock
- v_p = self.container_var()
- v_retry_counter = varoftype(lltype.Signed, 'retry_counter')
- blockst = Block([v_retry_counter]) # 'v_p' inserted below
- renamed_p = {blockst: v_p}
- annotator = self.stmtransformer.translator.annotator
- annotator.setbinding(v_p, lltype_to_annotation(self.CONTAINERP))
- annotator.setbinding(v_retry_counter, s_Int)
- #
- # change the startblock of callback_graph to point just after the
- # jit_merge_point
- block1, i = find_jit_merge_point(callback_graph, relaxed=True)
- assert i == len(block1.operations) - 1
- del block1.operations[i]
- [link] = block1.exits
- callback_graph.startblock = blockst
- #
- # fill in the operations of blockst: getfields reading all live vars
- a_vars = []
- for i in range(len(self.TYPES)):
- c_a_i = Constant('a%d' % i, lltype.Void)
- v_a_i = varoftype(self.TYPES[i])
- blockst.operations.append(
- SpaceOperation('getfield', [v_p, c_a_i], v_a_i))
- a_vars.append(v_a_i)
- blockst.closeblock(Link(a_vars, link.target))
- #
- # hack at the regular return block, to set the result into
- # 'p.result_value' and return 0. Note that 'p.got_exception'
- # is already cleared.
- blockr = callback_graph.returnblock
- c_result_value = Constant('result_value', lltype.Void)
- v_p = self.container_var()
- renamed_p[blockr] = v_p
- blockr.operations = [
- SpaceOperation('setfield',
- [v_p, c_result_value, blockr.inputargs[0]],
- varoftype(lltype.Void)),
- ]
- v = varoftype(lltype.Signed)
- annotator.setbinding(v, s_Int)
- newblockr = Block([v])
- newblockr.operations = ()
- newblockr.closeblock()
- blockr.recloseblock(Link([Constant(0, lltype.Signed)], newblockr))
- callback_graph.returnblock = newblockr
- #
- # add 'should_break_transaction()' at the end of the loop
- blockf = self.add_call_should_break_transaction(block1)
- # store the variables again into v_p
- v_p = self.container_var()
- renamed_p[blockf] = v_p
- for i in range(len(self.TYPES)):
- c_a_i = Constant('a%d' % i, lltype.Void)
- v_a_i = blockf.inputargs[i]
- assert v_a_i.concretetype == self.TYPES[i]
- blockf.operations.append(
- SpaceOperation('setfield', [v_p, c_a_i, v_a_i],
- varoftype(lltype.Void)))
- blockf.closeblock(Link([Constant(1, lltype.Signed)], newblockr))
- #
- # now pass the original 'v_p' everywhere
- for block in callback_graph.iterblocks():
- if block.operations == (): # skip return and except blocks
- continue
- v_p = renamed_p.get(block, self.container_var())
- block.inputargs = [v_p] + block.inputargs
- for link in block.exits:
- if link.target.operations != (): # to return or except block
- link.args = [v_p] + link.args
- #
- checkgraph(callback_graph)
- #
- FUNCTYPE = lltype.FuncType([self.CONTAINERP, lltype.Signed],
- lltype.Signed)
- mix = self.mixlevelannotator
- self.callback_function = mix.graph2delayed(callback_graph,
- FUNCTYPE=FUNCTYPE)
diff --git a/rpython/translator/stm/test/test_jitdriver.py b/rpython/translator/stm/test/test_jitdriver.py
deleted file mode 100644
--- a/rpython/translator/stm/test/test_jitdriver.py
+++ /dev/null
@@ -1,59 +0,0 @@
-from rpython.rtyper.lltypesystem import lltype, rffi
-from rpython.translator.stm.test.transform_support import BaseTestTransform
-from rpython.rlib.jit import JitDriver
-from rpython.rlib import rstm
-
-
-class TestJitDriver(BaseTestTransform):
- do_jit_driver = True
-
- def test_loop_no_arg(self):
- class X:
- counter = 10
- x = X()
- myjitdriver = JitDriver(greens=[], reds=[],
- stm_do_transaction_breaks=True)
-
- def f1():
- while x.counter > 0:
- myjitdriver.jit_merge_point()
- if rstm.jit_stm_should_break_transaction(False):
- rstm.jit_stm_transaction_break_point()
- x.counter -= 1
- return 'X'
-
- res = self.interpret(f1, [])
- assert res == 'X'
-
- def test_loop_args(self):
- class X:
- counter = 100
- x = X()
- myjitdriver = JitDriver(greens=['a'], reds=['b', 'c'])
-
- def f1(a, b, c):
- while x.counter > 0:
- myjitdriver.jit_merge_point(a=a, b=b, c=c)
- x.counter -= (ord(a) + rffi.cast(lltype.Signed, b) + c)
- return 'X'
-
- res = self.interpret(f1, ['\x03', rffi.cast(rffi.SHORT, 4), 2])
- assert res == 'X'
-
- def test_loop_void_result(self):
- class X:
- counter = 10
- x = X()
- myjitdriver = JitDriver(greens=[], reds=[],
- stm_do_transaction_breaks=True)
-
- def f1():
- while x.counter > 0:
- myjitdriver.jit_merge_point()
- if rstm.jit_stm_should_break_transaction(False):
- rstm.jit_stm_transaction_break_point()
-
- x.counter -= 1
-
- res = self.interpret(f1, [])
- assert res == None
diff --git a/rpython/translator/stm/transform.py b/rpython/translator/stm/transform.py
--- a/rpython/translator/stm/transform.py
+++ b/rpython/translator/stm/transform.py
@@ -1,6 +1,5 @@
from rpython.translator.stm.inevitable import insert_turn_inevitable
from rpython.translator.stm.readbarrier import insert_stm_read_barrier
-from rpython.translator.stm.jitdriver import reorganize_around_jit_driver
from rpython.translator.c.support import log
@@ -12,7 +11,6 @@
def transform(self):
assert not hasattr(self.translator, 'stm_transformation_applied')
self.start_log(1)
- self.transform_jit_driver()
self.transform_turn_inevitable()
self.print_logs(1)
self.translator.stm_transformation_applied = True
@@ -35,10 +33,6 @@
for graph in self.translator.graphs:
insert_turn_inevitable(graph)
- def transform_jit_driver(self):
- for graph in self.translator.graphs:
- reorganize_around_jit_driver(self, graph)
-
def start_log(self, step):
log.info("Software Transactional Memory transformation, step %d"
% step)
From noreply at buildbot.pypy.org Sun Aug 17 10:26:16 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Sun, 17 Aug 2014 10:26:16 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Bah
Message-ID: <20140817082616.306A91C332E@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72840:7f570c76ae70
Date: 2014-08-17 10:25 +0200
http://bitbucket.org/pypy/pypy/changeset/7f570c76ae70/
Log: Bah
diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py
--- a/rpython/memory/gctransform/stmframework.py
+++ b/rpython/memory/gctransform/stmframework.py
@@ -157,9 +157,8 @@
# sync with lloperation.py
gct_stm_become_inevitable = _gct_with_roots_pushed
-
gct_stm_become_globally_unique_transaction = _gct_with_roots_pushed
- gct_stm_perform_transaction = _gct_with_roots_pushed
+ gct_stm_transaction_break = _gct_with_roots_pushed
class StmRootWalker(BaseRootWalker):
From noreply at buildbot.pypy.org Sun Aug 17 16:01:36 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Sun, 17 Aug 2014 16:01:36 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Remove some other inevitable
transactions
Message-ID: <20140817140136.6AD0B1C059C@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72841:9d2407315c43
Date: 2014-08-17 16:01 +0200
http://bitbucket.org/pypy/pypy/changeset/9d2407315c43/
Log: Remove some other inevitable transactions
diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py
--- a/rpython/rlib/rstm.py
+++ b/rpython/rlib/rstm.py
@@ -125,21 +125,21 @@
def after_external_call():
if we_are_translated():
# starts a new transaction if we are not atomic already
- llop.stm_start_inevitable_if_not_atomic(lltype.Void)
+ llop.stm_start_if_not_atomic(lltype.Void)
after_external_call._dont_reach_me_in_del_ = True
after_external_call._transaction_break_ = True
@dont_look_inside
-def enter_callback_call():
+def enter_callback_call(rjbuf):
if we_are_translated():
- return llop.stm_enter_callback_call(lltype.Signed)
+ return llop.stm_enter_callback_call(lltype.Signed, rjbuf)
enter_callback_call._dont_reach_me_in_del_ = True
enter_callback_call._transaction_break_ = True
@dont_look_inside
-def leave_callback_call(token):
+def leave_callback_call(rjbuf, token):
if we_are_translated():
- llop.stm_leave_callback_call(lltype.Void, token)
+ llop.stm_leave_callback_call(lltype.Void, rjbuf, token)
leave_callback_call._dont_reach_me_in_del_ = True
leave_callback_call._transaction_break_ = True
diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py
--- a/rpython/rtyper/lltypesystem/lloperation.py
+++ b/rpython/rtyper/lltypesystem/lloperation.py
@@ -430,7 +430,7 @@
'stm_push_root': LLOp(),
'stm_pop_root_into': LLOp(),
'stm_commit_if_not_atomic': LLOp(canmallocgc=True),
- 'stm_start_inevitable_if_not_atomic': LLOp(canmallocgc=True),
+ 'stm_start_if_not_atomic': LLOp(canmallocgc=True),
'stm_abort_and_retry': LLOp(canmallocgc=True),
'stm_enter_callback_call': LLOp(canmallocgc=True),
'stm_leave_callback_call': LLOp(),
diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py
--- a/rpython/rtyper/lltypesystem/rffi.py
+++ b/rpython/rtyper/lltypesystem/rffi.py
@@ -315,10 +315,10 @@
source = py.code.Source(r"""
def wrapper(%(args)s): # no *args - no GIL for mallocing the tuple
token = 0
+ rjbuf = llop.stm_rewind_jmp_frame(llmemory.Address, 1)
if aroundstate is not None:
if aroundstate.enter_callback is not None:
- token = aroundstate.enter_callback()
- llop.stm_rewind_jmp_frame(lltype.Void, 1)
+ token = aroundstate.enter_callback(rjbuf)
else:
after = aroundstate.after
if after is not None:
@@ -339,8 +339,7 @@
stackcounter.stacks_counter -= 1
if aroundstate is not None:
if aroundstate.leave_callback is not None:
- llop.stm_rewind_jmp_frame(lltype.Void, 2)
- aroundstate.leave_callback(token)
+ aroundstate.leave_callback(rjbuf, token)
else:
before = aroundstate.before
if before is not None:
@@ -355,13 +354,16 @@
miniglobals['os'] = os
miniglobals['we_are_translated'] = we_are_translated
miniglobals['stackcounter'] = stackcounter
+ miniglobals['llmemory'] = llmemory
exec source.compile() in miniglobals
return miniglobals['wrapper']
_make_wrapper_for._annspecialcase_ = 'specialize:memo'
AroundFnPtr = lltype.Ptr(lltype.FuncType([], lltype.Void))
-EnterCallbackFnPtr = lltype.Ptr(lltype.FuncType([], lltype.Signed))
-LeaveCallbackFnPtr = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Void))
+EnterCallbackFnPtr = lltype.Ptr(lltype.FuncType([llmemory.Address],
+ lltype.Signed))
+LeaveCallbackFnPtr = lltype.Ptr(lltype.FuncType([llmemory.Address,
+ lltype.Signed], lltype.Void))
class AroundState:
_alloc_flavor_ = "raw"
diff --git a/rpython/translator/stm/breakfinder.py b/rpython/translator/stm/breakfinder.py
--- a/rpython/translator/stm/breakfinder.py
+++ b/rpython/translator/stm/breakfinder.py
@@ -4,8 +4,7 @@
TRANSACTION_BREAK = set([
'stm_commit_if_not_atomic',
- 'stm_start_inevitable_if_not_atomic',
- #'stm_perform_transaction',
+ 'stm_start_if_not_atomic',
#'stm_partial_commit_and_resume_other_threads', # new priv_revision
#'jit_assembler_call',
#'jit_stm_transaction_break_point',
diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py
--- a/rpython/translator/stm/funcgen.py
+++ b/rpython/translator/stm/funcgen.py
@@ -156,16 +156,18 @@
def stm_commit_if_not_atomic(funcgen, op):
return 'pypy_stm_commit_if_not_atomic();'
-def stm_start_inevitable_if_not_atomic(funcgen, op):
- return 'pypy_stm_start_inevitable_if_not_atomic();'
+def stm_start_if_not_atomic(funcgen, op):
+ return 'pypy_stm_start_if_not_atomic();'
def stm_enter_callback_call(funcgen, op):
+ arg0 = funcgen.expr(op.args[0])
result = funcgen.expr(op.result)
- return '%s = pypy_stm_enter_callback_call();' % (result,)
+ return '%s = pypy_stm_enter_callback_call(%s);' % (result, arg0)
def stm_leave_callback_call(funcgen, op):
arg0 = funcgen.expr(op.args[0])
- return 'pypy_stm_leave_callback_call(%s);' % (arg0,)
+ arg1 = funcgen.expr(op.args[1])
+ return 'pypy_stm_leave_callback_call(%s, %s);' % (arg0, arg1)
def stm_should_break_transaction(funcgen, op):
result = funcgen.expr(op.result)
@@ -259,8 +261,10 @@
def stm_rewind_jmp_frame(funcgen, op):
if len(op.args) == 0:
+ assert op.result.concretetype is lltype.Void
return '/* automatic stm_rewind_jmp_frame */'
elif op.args[0].value == 1:
- return 'stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf1);'
+ assert op.result.concretetype is llmemory.Address
+ return '%s = &rjbuf1;' % (funcgen.expr(op.result),)
else:
- return 'stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf1);'
+ assert False, op.args[0].value
diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c
--- a/rpython/translator/stm/src_stm/stmgcintf.c
+++ b/rpython/translator/stm/src_stm/stmgcintf.c
@@ -89,41 +89,45 @@
/* stm_teardown() not called here for now; it's mostly for tests */
}
-long pypy_stm_enter_callback_call(void)
+long pypy_stm_enter_callback_call(void *rjbuf)
{
if (pypy_stm_ready_atomic == 0) {
/* first time we see this thread */
assert(pypy_transaction_length >= 0);
int e = errno;
pypy_stm_register_thread_local();
+ stm_rewind_jmp_enterframe(&stm_thread_local, (rewind_jmp_buf *)rjbuf);
errno = e;
pypy_stm_ready_atomic = 1;
- pypy_stm_start_inevitable_if_not_atomic();
+ pypy_stm_start_if_not_atomic();
return 1;
}
else {
/* callback from C code, itself called from Python code */
- pypy_stm_start_inevitable_if_not_atomic();
+ stm_rewind_jmp_enterframe(&stm_thread_local, (rewind_jmp_buf *)rjbuf);
+ pypy_stm_start_if_not_atomic();
return 0;
}
}
-void pypy_stm_leave_callback_call(long token)
+void pypy_stm_leave_callback_call(void *rjbuf, long token)
{
+ int e = errno;
if (token == 1) {
/* if we're returning into foreign C code that was not itself
called from Python code, then we're ignoring the atomic
status and committing anyway. */
- int e = errno;
pypy_stm_ready_atomic = 1;
stm_commit_transaction();
pypy_stm_ready_atomic = 0;
+ stm_rewind_jmp_leaveframe(&stm_thread_local, (rewind_jmp_buf *)rjbuf);
pypy_stm_unregister_thread_local();
- errno = e;
}
else {
pypy_stm_commit_if_not_atomic();
+ stm_rewind_jmp_leaveframe(&stm_thread_local, (rewind_jmp_buf *)rjbuf);
}
+ errno = e;
}
void _pypy_stm_initialize_nursery_low_fill_mark(long v_counter)
diff --git a/rpython/translator/stm/src_stm/stmgcintf.h b/rpython/translator/stm/src_stm/stmgcintf.h
--- a/rpython/translator/stm/src_stm/stmgcintf.h
+++ b/rpython/translator/stm/src_stm/stmgcintf.h
@@ -53,6 +53,15 @@
}
errno = e;
}
+static inline void pypy_stm_start_if_not_atomic(void) {
+ if (pypy_stm_ready_atomic == 1) {
+ int e = errno;
+ stm_start_transaction(&stm_thread_local);
+ _pypy_stm_initialize_nursery_low_fill_mark(0);
+ _pypy_stm_inev_state();
+ errno = e;
+ }
+}
static inline void pypy_stm_start_inevitable_if_not_atomic(void) {
if (pypy_stm_ready_atomic == 1) {
int e = errno;
@@ -89,8 +98,8 @@
static inline long pypy_stm_get_atomic(void) {
return pypy_stm_ready_atomic - 1;
}
-long pypy_stm_enter_callback_call(void);
-void pypy_stm_leave_callback_call(long);
+long pypy_stm_enter_callback_call(void *);
+void pypy_stm_leave_callback_call(void *, long);
void pypy_stm_set_transaction_length(double);
void pypy_stm_transaction_break(void);
diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py
--- a/rpython/translator/stm/test/test_ztranslated.py
+++ b/rpython/translator/stm/test/test_ztranslated.py
@@ -73,7 +73,7 @@
rthread.start_new_thread(threadfn, ())
while glob.seen is None:
llop.stm_commit_if_not_atomic(lltype.Void)
- llop.stm_start_inevitable_if_not_atomic(lltype.Void)
+ llop.stm_start_if_not_atomic(lltype.Void)
return glob.seen.value
#
t, cbuilder = self.compile(entry_point)
@@ -470,7 +470,7 @@
lst[42] = 43
lst2[999] = lst
llop.stm_commit_if_not_atomic(lltype.Void)
- llop.stm_start_inevitable_if_not_atomic(lltype.Void)
+ llop.stm_start_if_not_atomic(lltype.Void)
print 'did not crash', lst2[999][42]
return 0
From noreply at buildbot.pypy.org Sun Aug 17 19:35:28 2014
From: noreply at buildbot.pypy.org (numerodix)
Date: Sun, 17 Aug 2014 19:35:28 +0200 (CEST)
Subject: [pypy-commit] pypy py3.3-fixes2: merge py3.3
Message-ID: <20140817173528.69E911C347F@cobra.cs.uni-duesseldorf.de>
Author: Martin Matusiak
Branch: py3.3-fixes2
Changeset: r72842:bb19e3e737b0
Date: 2014-08-17 15:00 +0200
http://bitbucket.org/pypy/pypy/changeset/bb19e3e737b0/
Log: merge py3.3
diff too long, truncating to 2000 out of 11112 lines
diff --git a/_pytest/__init__.py b/_pytest/__init__.py
--- a/_pytest/__init__.py
+++ b/_pytest/__init__.py
@@ -1,2 +1,2 @@
#
-__version__ = '2.2.4.dev2'
+__version__ = '2.5.2'
diff --git a/_pytest/_argcomplete.py b/_pytest/_argcomplete.py
new file mode 100644
--- /dev/null
+++ b/_pytest/_argcomplete.py
@@ -0,0 +1,104 @@
+
+"""allow bash-completion for argparse with argcomplete if installed
+needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail
+to find the magic string, so _ARGCOMPLETE env. var is never set, and
+this does not need special code.
+
+argcomplete does not support python 2.5 (although the changes for that
+are minor).
+
+Function try_argcomplete(parser) should be called directly before
+the call to ArgumentParser.parse_args().
+
+The filescompleter is what you normally would use on the positional
+arguments specification, in order to get "dirname/" after "dirn"
+instead of the default "dirname ":
+
+ optparser.add_argument(Config._file_or_dir, nargs='*'
+ ).completer=filescompleter
+
+Other, application specific, completers should go in the file
+doing the add_argument calls as they need to be specified as .completer
+attributes as well. (If argcomplete is not installed, the function the
+attribute points to will not be used).
+
+SPEEDUP
+=======
+The generic argcomplete script for bash-completion
+(/etc/bash_completion.d/python-argcomplete.sh )
+uses a python program to determine startup script generated by pip.
+You can speed up completion somewhat by changing this script to include
+ # PYTHON_ARGCOMPLETE_OK
+so the the python-argcomplete-check-easy-install-script does not
+need to be called to find the entry point of the code and see if that is
+marked with PYTHON_ARGCOMPLETE_OK
+
+INSTALL/DEBUGGING
+=================
+To include this support in another application that has setup.py generated
+scripts:
+- add the line:
+ # PYTHON_ARGCOMPLETE_OK
+ near the top of the main python entry point
+- include in the file calling parse_args():
+ from _argcomplete import try_argcomplete, filescompleter
+ , call try_argcomplete just before parse_args(), and optionally add
+ filescompleter to the positional arguments' add_argument()
+If things do not work right away:
+- switch on argcomplete debugging with (also helpful when doing custom
+ completers):
+ export _ARC_DEBUG=1
+- run:
+ python-argcomplete-check-easy-install-script $(which appname)
+ echo $?
+ will echo 0 if the magic line has been found, 1 if not
+- sometimes it helps to find early on errors using:
+ _ARGCOMPLETE=1 _ARC_DEBUG=1 appname
+ which should throw a KeyError: 'COMPLINE' (which is properly set by the
+ global argcomplete script).
+"""
+
+import sys
+import os
+from glob import glob
+
+class FastFilesCompleter:
+ 'Fast file completer class'
+ def __init__(self, directories=True):
+ self.directories = directories
+
+ def __call__(self, prefix, **kwargs):
+ """only called on non option completions"""
+ if os.path.sep in prefix[1:]: #
+ prefix_dir = len(os.path.dirname(prefix) + os.path.sep)
+ else:
+ prefix_dir = 0
+ completion = []
+ globbed = []
+ if '*' not in prefix and '?' not in prefix:
+ if prefix[-1] == os.path.sep: # we are on unix, otherwise no bash
+ globbed.extend(glob(prefix + '.*'))
+ prefix += '*'
+ globbed.extend(glob(prefix))
+ for x in sorted(globbed):
+ if os.path.isdir(x):
+ x += '/'
+ # append stripping the prefix (like bash, not like compgen)
+ completion.append(x[prefix_dir:])
+ return completion
+
+if os.environ.get('_ARGCOMPLETE'):
+ # argcomplete 0.5.6 is not compatible with python 2.5.6: print/with/format
+ if sys.version_info[:2] < (2, 6):
+ sys.exit(1)
+ try:
+ import argcomplete.completers
+ except ImportError:
+ sys.exit(-1)
+ filescompleter = FastFilesCompleter()
+
+ def try_argcomplete(parser):
+ argcomplete.autocomplete(parser)
+else:
+ def try_argcomplete(parser): pass
+ filescompleter = None
diff --git a/_pytest/assertion/__init__.py b/_pytest/assertion/__init__.py
--- a/_pytest/assertion/__init__.py
+++ b/_pytest/assertion/__init__.py
@@ -3,7 +3,6 @@
"""
import py
import sys
-import pytest
from _pytest.monkeypatch import monkeypatch
from _pytest.assertion import util
@@ -19,8 +18,8 @@
to provide assert expression information. """)
group.addoption('--no-assert', action="store_true", default=False,
dest="noassert", help="DEPRECATED equivalent to --assert=plain")
- group.addoption('--nomagic', action="store_true", default=False,
- dest="nomagic", help="DEPRECATED equivalent to --assert=plain")
+ group.addoption('--nomagic', '--no-magic', action="store_true",
+ default=False, help="DEPRECATED equivalent to --assert=plain")
class AssertionState:
"""State for the assertion plugin."""
@@ -35,22 +34,25 @@
mode = "plain"
if mode == "rewrite":
try:
- import ast
+ import ast # noqa
except ImportError:
mode = "reinterp"
else:
- if sys.platform.startswith('java'):
+ # Both Jython and CPython 2.6.0 have AST bugs that make the
+ # assertion rewriting hook malfunction.
+ if (sys.platform.startswith('java') or
+ sys.version_info[:3] == (2, 6, 0)):
mode = "reinterp"
if mode != "plain":
_load_modules(mode)
m = monkeypatch()
config._cleanup.append(m.undo)
m.setattr(py.builtin.builtins, 'AssertionError',
- reinterpret.AssertionError)
+ reinterpret.AssertionError) # noqa
hook = None
if mode == "rewrite":
- hook = rewrite.AssertionRewritingHook()
- sys.meta_path.append(hook)
+ hook = rewrite.AssertionRewritingHook() # noqa
+ sys.meta_path.insert(0, hook)
warn_about_missing_assertion(mode)
config._assertstate = AssertionState(config, mode)
config._assertstate.hook = hook
@@ -73,9 +75,16 @@
def callbinrepr(op, left, right):
hook_result = item.ihook.pytest_assertrepr_compare(
config=item.config, op=op, left=left, right=right)
+
for new_expl in hook_result:
if new_expl:
- res = '\n~'.join(new_expl)
+ # Don't include pageloads of data unless we are very
+ # verbose (-vv)
+ if (sum(len(p) for p in new_expl[1:]) > 80*8
+ and item.config.option.verbose < 2):
+ new_expl[1:] = [py.builtin._totext(
+ 'Detailed information truncated, use "-vv" to show')]
+ res = py.builtin._totext('\n~').join(new_expl)
if item.config.getvalue("assertmode") == "rewrite":
# The result will be fed back a python % formatting
# operation, which will fail if there are extraneous
@@ -95,9 +104,9 @@
def _load_modules(mode):
"""Lazily import assertion related code."""
global rewrite, reinterpret
- from _pytest.assertion import reinterpret
+ from _pytest.assertion import reinterpret # noqa
if mode == "rewrite":
- from _pytest.assertion import rewrite
+ from _pytest.assertion import rewrite # noqa
def warn_about_missing_assertion(mode):
try:
diff --git a/_pytest/assertion/newinterpret.py b/_pytest/assertion/newinterpret.py
--- a/_pytest/assertion/newinterpret.py
+++ b/_pytest/assertion/newinterpret.py
@@ -11,7 +11,7 @@
from _pytest.assertion.reinterpret import BuiltinAssertionError
-if sys.platform.startswith("java") and sys.version_info < (2, 5, 2):
+if sys.platform.startswith("java"):
# See http://bugs.jython.org/issue1497
_exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict",
"ListComp", "GeneratorExp", "Yield", "Compare", "Call",
diff --git a/_pytest/assertion/oldinterpret.py b/_pytest/assertion/oldinterpret.py
--- a/_pytest/assertion/oldinterpret.py
+++ b/_pytest/assertion/oldinterpret.py
@@ -526,10 +526,13 @@
# example:
def f():
return 5
+
def g():
return 3
+
def h(x):
return 'never'
+
check("f() * g() == 5")
check("not f()")
check("not (f() and g() or 0)")
diff --git a/_pytest/assertion/reinterpret.py b/_pytest/assertion/reinterpret.py
--- a/_pytest/assertion/reinterpret.py
+++ b/_pytest/assertion/reinterpret.py
@@ -1,18 +1,26 @@
import sys
import py
from _pytest.assertion.util import BuiltinAssertionError
+u = py.builtin._totext
+
class AssertionError(BuiltinAssertionError):
def __init__(self, *args):
BuiltinAssertionError.__init__(self, *args)
if args:
+ # on Python2.6 we get len(args)==2 for: assert 0, (x,y)
+ # on Python2.7 and above we always get len(args) == 1
+ # with args[0] being the (x,y) tuple.
+ if len(args) > 1:
+ toprint = args
+ else:
+ toprint = args[0]
try:
- self.msg = str(args[0])
- except py.builtin._sysex:
- raise
- except:
- self.msg = "<[broken __repr__] %s at %0xd>" %(
- args[0].__class__, id(args[0]))
+ self.msg = u(toprint)
+ except Exception:
+ self.msg = u(
+ "<[broken __repr__] %s at %0xd>"
+ % (toprint.__class__, id(toprint)))
else:
f = py.code.Frame(sys._getframe(1))
try:
@@ -44,4 +52,3 @@
from _pytest.assertion.newinterpret import interpret as reinterpret
else:
reinterpret = reinterpret_old
-
diff --git a/_pytest/assertion/rewrite.py b/_pytest/assertion/rewrite.py
--- a/_pytest/assertion/rewrite.py
+++ b/_pytest/assertion/rewrite.py
@@ -6,6 +6,7 @@
import imp
import marshal
import os
+import re
import struct
import sys
import types
@@ -14,13 +15,7 @@
from _pytest.assertion import util
-# Windows gives ENOENT in places *nix gives ENOTDIR.
-if sys.platform.startswith("win"):
- PATH_COMPONENT_NOT_DIR = errno.ENOENT
-else:
- PATH_COMPONENT_NOT_DIR = errno.ENOTDIR
-
-# py.test caches rewritten pycs in __pycache__.
+# pytest caches rewritten pycs in __pycache__.
if hasattr(imp, "get_tag"):
PYTEST_TAG = imp.get_tag() + "-PYTEST"
else:
@@ -34,17 +29,19 @@
PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1])
del ver, impl
-PYC_EXT = ".py" + "c" if __debug__ else "o"
+PYC_EXT = ".py" + (__debug__ and "c" or "o")
PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2)
+ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3
class AssertionRewritingHook(object):
- """Import hook which rewrites asserts."""
+ """PEP302 Import hook which rewrites asserts."""
def __init__(self):
self.session = None
self.modules = {}
+ self._register_with_pkg_resources()
def set_session(self, session):
self.fnpats = session.config.getini("python_files")
@@ -59,8 +56,12 @@
names = name.rsplit(".", 1)
lastname = names[-1]
pth = None
- if path is not None and len(path) == 1:
- pth = path[0]
+ if path is not None:
+ # Starting with Python 3.3, path is a _NamespacePath(), which
+ # causes problems if not converted to list.
+ path = list(path)
+ if len(path) == 1:
+ pth = path[0]
if pth is None:
try:
fd, fn, desc = imp.find_module(lastname, path)
@@ -95,12 +96,13 @@
finally:
self.session = sess
else:
- state.trace("matched test file (was specified on cmdline): %r" % (fn,))
+ state.trace("matched test file (was specified on cmdline): %r" %
+ (fn,))
# The requested module looks like a test file, so rewrite it. This is
# the most magical part of the process: load the source, rewrite the
# asserts, and load the rewritten source. We also cache the rewritten
# module code in a special pyc. We must be aware of the possibility of
- # concurrent py.test processes rewriting and loading pycs. To avoid
+ # concurrent pytest processes rewriting and loading pycs. To avoid
# tricky race conditions, we maintain the following invariant: The
# cached pyc is always a complete, valid pyc. Operations on it must be
# atomic. POSIX's atomic rename comes in handy.
@@ -116,19 +118,19 @@
# common case) or it's blocked by a non-dir node. In the
# latter case, we'll ignore it in _write_pyc.
pass
- elif e == PATH_COMPONENT_NOT_DIR:
+ elif e in [errno.ENOENT, errno.ENOTDIR]:
# One of the path components was not a directory, likely
# because we're in a zip file.
write = False
elif e == errno.EACCES:
- state.trace("read only directory: %r" % (fn_pypath.dirname,))
+ state.trace("read only directory: %r" % fn_pypath.dirname)
write = False
else:
raise
cache_name = fn_pypath.basename[:-3] + PYC_TAIL
pyc = os.path.join(cache_dir, cache_name)
- # Notice that even if we're in a read-only directory, I'm going to check
- # for a cached pyc. This may not be optimal...
+ # Notice that even if we're in a read-only directory, I'm going
+ # to check for a cached pyc. This may not be optimal...
co = _read_pyc(fn_pypath, pyc)
if co is None:
state.trace("rewriting %r" % (fn,))
@@ -153,27 +155,59 @@
mod.__file__ = co.co_filename
# Normally, this attribute is 3.2+.
mod.__cached__ = pyc
+ mod.__loader__ = self
py.builtin.exec_(co, mod.__dict__)
except:
del sys.modules[name]
raise
return sys.modules[name]
-def _write_pyc(co, source_path, pyc):
- # Technically, we don't have to have the same pyc format as (C)Python, since
- # these "pycs" should never be seen by builtin import. However, there's
- # little reason deviate, and I hope sometime to be able to use
- # imp.load_compiled to load them. (See the comment in load_module above.)
+
+
+ def is_package(self, name):
+ try:
+ fd, fn, desc = imp.find_module(name)
+ except ImportError:
+ return False
+ if fd is not None:
+ fd.close()
+ tp = desc[2]
+ return tp == imp.PKG_DIRECTORY
+
+ @classmethod
+ def _register_with_pkg_resources(cls):
+ """
+ Ensure package resources can be loaded from this loader. May be called
+ multiple times, as the operation is idempotent.
+ """
+ try:
+ import pkg_resources
+ # access an attribute in case a deferred importer is present
+ pkg_resources.__name__
+ except ImportError:
+ return
+
+ # Since pytest tests are always located in the file system, the
+ # DefaultProvider is appropriate.
+ pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider)
+
+
+def _write_pyc(state, co, source_path, pyc):
+ # Technically, we don't have to have the same pyc format as
+ # (C)Python, since these "pycs" should never be seen by builtin
+ # import. However, there's little reason deviate, and I hope
+ # sometime to be able to use imp.load_compiled to load them. (See
+ # the comment in load_module above.)
mtime = int(source_path.mtime())
try:
fp = open(pyc, "wb")
except IOError:
err = sys.exc_info()[1].errno
- if err == PATH_COMPONENT_NOT_DIR:
- # This happens when we get a EEXIST in find_module creating the
- # __pycache__ directory and __pycache__ is by some non-dir node.
- return False
- raise
+ state.trace("error writing pyc file at %s: errno=%s" %(pyc, err))
+ # we ignore any failure to write the cache file
+ # there are many reasons, permission-denied, __pycache__ being a
+ # file etc.
+ return False
try:
fp.write(imp.get_magic())
fp.write(struct.pack(">",
- ast.Add : "+",
- ast.Sub : "-",
- ast.Mult : "*",
- ast.Div : "/",
- ast.FloorDiv : "//",
- ast.Mod : "%",
- ast.Eq : "==",
- ast.NotEq : "!=",
- ast.Lt : "<",
- ast.LtE : "<=",
- ast.Gt : ">",
- ast.GtE : ">=",
- ast.Pow : "**",
- ast.Is : "is",
- ast.IsNot : "is not",
- ast.In : "in",
- ast.NotIn : "not in"
+ ast.BitOr: "|",
+ ast.BitXor: "^",
+ ast.BitAnd: "&",
+ ast.LShift: "<<",
+ ast.RShift: ">>",
+ ast.Add: "+",
+ ast.Sub: "-",
+ ast.Mult: "*",
+ ast.Div: "/",
+ ast.FloorDiv: "//",
+ ast.Mod: "%%", # escaped for string formatting
+ ast.Eq: "==",
+ ast.NotEq: "!=",
+ ast.Lt: "<",
+ ast.LtE: "<=",
+ ast.Gt: ">",
+ ast.GtE: ">=",
+ ast.Pow: "**",
+ ast.Is: "is",
+ ast.IsNot: "is not",
+ ast.In: "in",
+ ast.NotIn: "not in"
}
@@ -341,7 +408,7 @@
lineno = 0
for item in mod.body:
if (expect_docstring and isinstance(item, ast.Expr) and
- isinstance(item.value, ast.Str)):
+ isinstance(item.value, ast.Str)):
doc = item.value.s
if "PYTEST_DONT_REWRITE" in doc:
# The module has disabled assertion rewriting.
@@ -462,7 +529,8 @@
body.append(raise_)
# Clear temporary variables by setting them to None.
if self.variables:
- variables = [ast.Name(name, ast.Store()) for name in self.variables]
+ variables = [ast.Name(name, ast.Store())
+ for name in self.variables]
clear = ast.Assign(variables, ast.Name("None", ast.Load()))
self.statements.append(clear)
# Fix line numbers.
@@ -471,11 +539,12 @@
return self.statements
def visit_Name(self, name):
- # Check if the name is local or not.
+ # Display the repr of the name if it's a local variable or
+ # _should_repr_global_name() thinks it's acceptable.
locs = ast.Call(self.builtin("locals"), [], [], None, None)
- globs = ast.Call(self.builtin("globals"), [], [], None, None)
- ops = [ast.In(), ast.IsNot()]
- test = ast.Compare(ast.Str(name.id), ops, [locs, globs])
+ inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
+ dorepr = self.helper("should_repr_global_name", name)
+ test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
return name, self.explanation_param(expr)
@@ -492,7 +561,8 @@
for i, v in enumerate(boolop.values):
if i:
fail_inner = []
- self.on_failure.append(ast.If(cond, fail_inner, []))
+ # cond is set in a prior loop iteration below
+ self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa
self.on_failure = fail_inner
self.push_format_context()
res, expl = self.visit(v)
@@ -548,7 +618,8 @@
new_kwarg, expl = self.visit(call.kwargs)
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
- new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg)
+ new_call = ast.Call(new_func, new_args, new_kwargs,
+ new_star, new_kwarg)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
@@ -584,7 +655,7 @@
res_expr = ast.Compare(left_res, [op], [next_res])
self.statements.append(ast.Assign([store_names[i]], res_expr))
left_res, left_expl = next_res, next_expl
- # Use py.code._reprcompare if that's available.
+ # Use pytest.assertion.util._reprcompare if that's available.
expl_call = self.helper("call_reprcompare",
ast.Tuple(syms, ast.Load()),
ast.Tuple(load_names, ast.Load()),
diff --git a/_pytest/assertion/util.py b/_pytest/assertion/util.py
--- a/_pytest/assertion/util.py
+++ b/_pytest/assertion/util.py
@@ -1,8 +1,13 @@
"""Utilities for assertion debugging"""
import py
+try:
+ from collections import Sequence
+except ImportError:
+ Sequence = list
BuiltinAssertionError = py.builtin.builtins.AssertionError
+u = py.builtin._totext
# The _reprcompare attribute on the util module is used by the new assertion
# interpretation code and assertion rewriter to detect this plugin was
@@ -10,6 +15,7 @@
# DebugInterpreter.
_reprcompare = None
+
def format_explanation(explanation):
"""This formats an explanation
@@ -20,7 +26,18 @@
for when one explanation needs to span multiple lines, e.g. when
displaying diffs.
"""
- # simplify 'assert False where False = ...'
+ explanation = _collapse_false(explanation)
+ lines = _split_explanation(explanation)
+ result = _format_lines(lines)
+ return u('\n').join(result)
+
+
+def _collapse_false(explanation):
+ """Collapse expansions of False
+
+ So this strips out any "assert False\n{where False = ...\n}"
+ blocks.
+ """
where = 0
while True:
start = where = explanation.find("False\n{False = ", where)
@@ -42,28 +59,48 @@
explanation = (explanation[:start] + explanation[start+15:end-1] +
explanation[end+1:])
where -= 17
- raw_lines = (explanation or '').split('\n')
- # escape newlines not followed by {, } and ~
+ return explanation
+
+
+def _split_explanation(explanation):
+ """Return a list of individual lines in the explanation
+
+ This will return a list of lines split on '\n{', '\n}' and '\n~'.
+ Any other newlines will be escaped and appear in the line as the
+ literal '\n' characters.
+ """
+ raw_lines = (explanation or u('')).split('\n')
lines = [raw_lines[0]]
for l in raw_lines[1:]:
if l.startswith('{') or l.startswith('}') or l.startswith('~'):
lines.append(l)
else:
lines[-1] += '\\n' + l
+ return lines
+
+def _format_lines(lines):
+ """Format the individual lines
+
+ This will replace the '{', '}' and '~' characters of our mini
+ formatting language with the proper 'where ...', 'and ...' and ' +
+ ...' text, taking care of indentation along the way.
+
+ Return a list of formatted lines.
+ """
result = lines[:1]
stack = [0]
stackcnt = [0]
for line in lines[1:]:
if line.startswith('{'):
if stackcnt[-1]:
- s = 'and '
+ s = u('and ')
else:
- s = 'where '
+ s = u('where ')
stack.append(len(result))
stackcnt[-1] += 1
stackcnt.append(0)
- result.append(' +' + ' '*(len(stack)-1) + s + line[1:])
+ result.append(u(' +') + u(' ')*(len(stack)-1) + s + line[1:])
elif line.startswith('}'):
assert line.startswith('}')
stack.pop()
@@ -71,9 +108,9 @@
result[stack[-1]] += line[1:]
else:
assert line.startswith('~')
- result.append(' '*len(stack) + line[1:])
+ result.append(u(' ')*len(stack) + line[1:])
assert len(stack) == 1
- return '\n'.join(result)
+ return result
# Provide basestring in python3
@@ -83,132 +120,163 @@
basestring = str
-def assertrepr_compare(op, left, right):
- """return specialised explanations for some operators/operands"""
- width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
+def assertrepr_compare(config, op, left, right):
+ """Return specialised explanations for some operators/operands"""
+ width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
left_repr = py.io.saferepr(left, maxsize=int(width/2))
right_repr = py.io.saferepr(right, maxsize=width-len(left_repr))
- summary = '%s %s %s' % (left_repr, op, right_repr)
+ summary = u('%s %s %s') % (left_repr, op, right_repr)
- issequence = lambda x: isinstance(x, (list, tuple))
+ issequence = lambda x: (isinstance(x, (list, tuple, Sequence))
+ and not isinstance(x, basestring))
istext = lambda x: isinstance(x, basestring)
isdict = lambda x: isinstance(x, dict)
- isset = lambda x: isinstance(x, set)
+ isset = lambda x: isinstance(x, (set, frozenset))
+ verbose = config.getoption('verbose')
explanation = None
try:
if op == '==':
if istext(left) and istext(right):
- explanation = _diff_text(left, right)
+ explanation = _diff_text(left, right, verbose)
elif issequence(left) and issequence(right):
- explanation = _compare_eq_sequence(left, right)
+ explanation = _compare_eq_sequence(left, right, verbose)
elif isset(left) and isset(right):
- explanation = _compare_eq_set(left, right)
+ explanation = _compare_eq_set(left, right, verbose)
elif isdict(left) and isdict(right):
- explanation = _diff_text(py.std.pprint.pformat(left),
- py.std.pprint.pformat(right))
+ explanation = _compare_eq_dict(left, right, verbose)
elif op == 'not in':
if istext(left) and istext(right):
- explanation = _notin_text(left, right)
- except py.builtin._sysex:
- raise
- except:
+ explanation = _notin_text(left, right, verbose)
+ except Exception:
excinfo = py.code.ExceptionInfo()
- explanation = ['(pytest_assertion plugin: representation of '
- 'details failed. Probably an object has a faulty __repr__.)',
- str(excinfo)
- ]
-
+ explanation = [
+ u('(pytest_assertion plugin: representation of details failed. '
+ 'Probably an object has a faulty __repr__.)'),
+ u(excinfo)]
if not explanation:
return None
- # Don't include pageloads of data, should be configurable
- if len(''.join(explanation)) > 80*8:
- explanation = ['Detailed information too verbose, truncated']
-
return [summary] + explanation
-def _diff_text(left, right):
- """Return the explanation for the diff between text
+def _diff_text(left, right, verbose=False):
+ """Return the explanation for the diff between text or bytes
- This will skip leading and trailing characters which are
- identical to keep the diff minimal.
+ Unless --verbose is used this will skip leading and trailing
+ characters which are identical to keep the diff minimal.
+
+ If the input are bytes they will be safely converted to text.
"""
explanation = []
- i = 0 # just in case left or right has zero length
- for i in range(min(len(left), len(right))):
- if left[i] != right[i]:
- break
- if i > 42:
- i -= 10 # Provide some context
- explanation = ['Skipping %s identical '
- 'leading characters in diff' % i]
- left = left[i:]
- right = right[i:]
- if len(left) == len(right):
- for i in range(len(left)):
- if left[-i] != right[-i]:
+ if isinstance(left, py.builtin.bytes):
+ left = u(repr(left)[1:-1]).replace(r'\n', '\n')
+ if isinstance(right, py.builtin.bytes):
+ right = u(repr(right)[1:-1]).replace(r'\n', '\n')
+ if not verbose:
+ i = 0 # just in case left or right has zero length
+ for i in range(min(len(left), len(right))):
+ if left[i] != right[i]:
break
if i > 42:
- i -= 10 # Provide some context
- explanation += ['Skipping %s identical '
- 'trailing characters in diff' % i]
- left = left[:-i]
- right = right[:-i]
+ i -= 10 # Provide some context
+ explanation = [u('Skipping %s identical leading '
+ 'characters in diff, use -v to show') % i]
+ left = left[i:]
+ right = right[i:]
+ if len(left) == len(right):
+ for i in range(len(left)):
+ if left[-i] != right[-i]:
+ break
+ if i > 42:
+ i -= 10 # Provide some context
+ explanation += [u('Skipping %s identical trailing '
+ 'characters in diff, use -v to show') % i]
+ left = left[:-i]
+ right = right[:-i]
explanation += [line.strip('\n')
for line in py.std.difflib.ndiff(left.splitlines(),
right.splitlines())]
return explanation
-def _compare_eq_sequence(left, right):
+def _compare_eq_sequence(left, right, verbose=False):
explanation = []
for i in range(min(len(left), len(right))):
if left[i] != right[i]:
- explanation += ['At index %s diff: %r != %r' %
- (i, left[i], right[i])]
+ explanation += [u('At index %s diff: %r != %r')
+ % (i, left[i], right[i])]
break
if len(left) > len(right):
- explanation += ['Left contains more items, '
- 'first extra item: %s' % py.io.saferepr(left[len(right)],)]
+ explanation += [u('Left contains more items, first extra item: %s')
+ % py.io.saferepr(left[len(right)],)]
elif len(left) < len(right):
- explanation += ['Right contains more items, '
- 'first extra item: %s' % py.io.saferepr(right[len(left)],)]
- return explanation # + _diff_text(py.std.pprint.pformat(left),
- # py.std.pprint.pformat(right))
+ explanation += [
+ u('Right contains more items, first extra item: %s') %
+ py.io.saferepr(right[len(left)],)]
+ return explanation # + _diff_text(py.std.pprint.pformat(left),
+ # py.std.pprint.pformat(right))
-def _compare_eq_set(left, right):
+def _compare_eq_set(left, right, verbose=False):
explanation = []
diff_left = left - right
diff_right = right - left
if diff_left:
- explanation.append('Extra items in the left set:')
+ explanation.append(u('Extra items in the left set:'))
for item in diff_left:
explanation.append(py.io.saferepr(item))
if diff_right:
- explanation.append('Extra items in the right set:')
+ explanation.append(u('Extra items in the right set:'))
for item in diff_right:
explanation.append(py.io.saferepr(item))
return explanation
-def _notin_text(term, text):
+def _compare_eq_dict(left, right, verbose=False):
+ explanation = []
+ common = set(left).intersection(set(right))
+ same = dict((k, left[k]) for k in common if left[k] == right[k])
+ if same and not verbose:
+ explanation += [u('Omitting %s identical items, use -v to show') %
+ len(same)]
+ elif same:
+ explanation += [u('Common items:')]
+ explanation += py.std.pprint.pformat(same).splitlines()
+ diff = set(k for k in common if left[k] != right[k])
+ if diff:
+ explanation += [u('Differing items:')]
+ for k in diff:
+ explanation += [py.io.saferepr({k: left[k]}) + ' != ' +
+ py.io.saferepr({k: right[k]})]
+ extra_left = set(left) - set(right)
+ if extra_left:
+ explanation.append(u('Left contains more items:'))
+ explanation.extend(py.std.pprint.pformat(
+ dict((k, left[k]) for k in extra_left)).splitlines())
+ extra_right = set(right) - set(left)
+ if extra_right:
+ explanation.append(u('Right contains more items:'))
+ explanation.extend(py.std.pprint.pformat(
+ dict((k, right[k]) for k in extra_right)).splitlines())
+ return explanation
+
+
+def _notin_text(term, text, verbose=False):
index = text.find(term)
head = text[:index]
tail = text[index+len(term):]
correct_text = head + tail
- diff = _diff_text(correct_text, text)
- newdiff = ['%s is contained here:' % py.io.saferepr(term, maxsize=42)]
+ diff = _diff_text(correct_text, text, verbose)
+ newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)]
for line in diff:
- if line.startswith('Skipping'):
+ if line.startswith(u('Skipping')):
continue
- if line.startswith('- '):
+ if line.startswith(u('- ')):
continue
- if line.startswith('+ '):
- newdiff.append(' ' + line[2:])
+ if line.startswith(u('+ ')):
+ newdiff.append(u(' ') + line[2:])
else:
newdiff.append(line)
return newdiff
diff --git a/_pytest/capture.py b/_pytest/capture.py
--- a/_pytest/capture.py
+++ b/_pytest/capture.py
@@ -1,43 +1,114 @@
-""" per-test stdout/stderr capturing mechanisms, ``capsys`` and ``capfd`` function arguments. """
+"""
+ per-test stdout/stderr capturing mechanisms,
+ ``capsys`` and ``capfd`` function arguments.
+"""
+# note: py.io capture was where copied from
+# pylib 1.4.20.dev2 (rev 13d9af95547e)
+import sys
+import os
+import tempfile
-import pytest, py
-import os
+import py
+import pytest
+
+try:
+ from io import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+try:
+ from io import BytesIO
+except ImportError:
+ class BytesIO(StringIO):
+ def write(self, data):
+ if isinstance(data, unicode):
+ raise TypeError("not a byte value: %r" % (data,))
+ StringIO.write(self, data)
+
+if sys.version_info < (3, 0):
+ class TextIO(StringIO):
+ def write(self, data):
+ if not isinstance(data, unicode):
+ enc = getattr(self, '_encoding', 'UTF-8')
+ data = unicode(data, enc, 'replace')
+ StringIO.write(self, data)
+else:
+ TextIO = StringIO
+
+
+patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'}
+
def pytest_addoption(parser):
group = parser.getgroup("general")
- group._addoption('--capture', action="store", default=None,
- metavar="method", type="choice", choices=['fd', 'sys', 'no'],
+ group._addoption(
+ '--capture', action="store", default=None,
+ metavar="method", choices=['fd', 'sys', 'no'],
help="per-test capturing method: one of fd (default)|sys|no.")
- group._addoption('-s', action="store_const", const="no", dest="capture",
+ group._addoption(
+ '-s', action="store_const", const="no", dest="capture",
help="shortcut for --capture=no.")
+
@pytest.mark.tryfirst
-def pytest_cmdline_parse(pluginmanager, args):
- # we want to perform capturing already for plugin/conftest loading
- if '-s' in args or "--capture=no" in args:
- method = "no"
- elif hasattr(os, 'dup') and '--capture=sys' not in args:
+def pytest_load_initial_conftests(early_config, parser, args, __multicall__):
+ ns = parser.parse_known_args(args)
+ method = ns.capture
+ if not method:
method = "fd"
- else:
+ if method == "fd" and not hasattr(os, "dup"):
method = "sys"
capman = CaptureManager(method)
- pluginmanager.register(capman, "capturemanager")
+ early_config.pluginmanager.register(capman, "capturemanager")
+
+ # make sure that capturemanager is properly reset at final shutdown
+ def teardown():
+ try:
+ capman.reset_capturings()
+ except ValueError:
+ pass
+
+ early_config.pluginmanager.add_shutdown(teardown)
+
+ # make sure logging does not raise exceptions at the end
+ def silence_logging_at_shutdown():
+ if "logging" in sys.modules:
+ sys.modules["logging"].raiseExceptions = False
+ early_config.pluginmanager.add_shutdown(silence_logging_at_shutdown)
+
+ # finally trigger conftest loading but while capturing (issue93)
+ capman.resumecapture()
+ try:
+ try:
+ return __multicall__.execute()
+ finally:
+ out, err = capman.suspendcapture()
+ except:
+ sys.stdout.write(out)
+ sys.stderr.write(err)
+ raise
+
def addouterr(rep, outerr):
for secname, content in zip(["out", "err"], outerr):
if content:
rep.sections.append(("Captured std%s" % secname, content))
+
class NoCapture:
def startall(self):
pass
+
def resume(self):
pass
+
def reset(self):
pass
+
def suspend(self):
return "", ""
+
class CaptureManager:
def __init__(self, defaultmethod=None):
self._method2capture = {}
@@ -45,21 +116,23 @@
def _maketempfile(self):
f = py.std.tempfile.TemporaryFile()
- newf = py.io.dupfile(f, encoding="UTF-8")
+ newf = dupfile(f, encoding="UTF-8")
f.close()
return newf
def _makestringio(self):
- return py.io.TextIO()
+ return TextIO()
def _getcapture(self, method):
if method == "fd":
- return py.io.StdCaptureFD(now=False,
- out=self._maketempfile(), err=self._maketempfile()
+ return StdCaptureFD(
+ out=self._maketempfile(),
+ err=self._maketempfile(),
)
elif method == "sys":
- return py.io.StdCapture(now=False,
- out=self._makestringio(), err=self._makestringio()
+ return StdCapture(
+ out=self._makestringio(),
+ err=self._makestringio(),
)
elif method == "no":
return NoCapture()
@@ -74,23 +147,24 @@
method = config._conftest.rget("option_capture", path=fspath)
except KeyError:
method = "fd"
- if method == "fd" and not hasattr(os, 'dup'): # e.g. jython
+ if method == "fd" and not hasattr(os, 'dup'): # e.g. jython
method = "sys"
return method
def reset_capturings(self):
- for name, cap in self._method2capture.items():
+ for cap in self._method2capture.values():
cap.reset()
def resumecapture_item(self, item):
method = self._getmethod(item.config, item.fspath)
if not hasattr(item, 'outerr'):
- item.outerr = ('', '') # we accumulate outerr on the item
+ item.outerr = ('', '') # we accumulate outerr on the item
return self.resumecapture(method)
def resumecapture(self, method=None):
if hasattr(self, '_capturing'):
- raise ValueError("cannot resume, already capturing with %r" %
+ raise ValueError(
+ "cannot resume, already capturing with %r" %
(self._capturing,))
if method is None:
method = self._defaultmethod
@@ -119,30 +193,29 @@
return "", ""
def activate_funcargs(self, pyfuncitem):
- if not hasattr(pyfuncitem, 'funcargs'):
- return
- assert not hasattr(self, '_capturing_funcargs')
- self._capturing_funcargs = capturing_funcargs = []
- for name, capfuncarg in pyfuncitem.funcargs.items():
- if name in ('capsys', 'capfd'):
- capturing_funcargs.append(capfuncarg)
- capfuncarg._start()
+ funcargs = getattr(pyfuncitem, "funcargs", None)
+ if funcargs is not None:
+ for name, capfuncarg in funcargs.items():
+ if name in ('capsys', 'capfd'):
+ assert not hasattr(self, '_capturing_funcarg')
+ self._capturing_funcarg = capfuncarg
+ capfuncarg._start()
def deactivate_funcargs(self):
- capturing_funcargs = getattr(self, '_capturing_funcargs', None)
- if capturing_funcargs is not None:
- while capturing_funcargs:
- capfuncarg = capturing_funcargs.pop()
- capfuncarg._finalize()
- del self._capturing_funcargs
+ capturing_funcarg = getattr(self, '_capturing_funcarg', None)
+ if capturing_funcarg:
+ outerr = capturing_funcarg._finalize()
+ del self._capturing_funcarg
+ return outerr
def pytest_make_collect_report(self, __multicall__, collector):
method = self._getmethod(collector.config, collector.fspath)
try:
self.resumecapture(method)
except ValueError:
- return # recursive collect, XXX refactor capturing
- # to allow for more lightweight recursive capturing
+ # recursive collect, XXX refactor capturing
+ # to allow for more lightweight recursive capturing
+ return
try:
rep = __multicall__.execute()
finally:
@@ -169,46 +242,371 @@
@pytest.mark.tryfirst
def pytest_runtest_makereport(self, __multicall__, item, call):
- self.deactivate_funcargs()
+ funcarg_outerr = self.deactivate_funcargs()
rep = __multicall__.execute()
outerr = self.suspendcapture(item)
- if not rep.passed:
- addouterr(rep, outerr)
+ if funcarg_outerr is not None:
+ outerr = (outerr[0] + funcarg_outerr[0],
+ outerr[1] + funcarg_outerr[1])
+ addouterr(rep, outerr)
if not rep.passed or rep.when == "teardown":
outerr = ('', '')
item.outerr = outerr
return rep
+error_capsysfderror = "cannot use capsys and capfd at the same time"
+
+
def pytest_funcarg__capsys(request):
"""enables capturing of writes to sys.stdout/sys.stderr and makes
captured output available via ``capsys.readouterr()`` method calls
which return a ``(out, err)`` tuple.
"""
- return CaptureFuncarg(py.io.StdCapture)
+ if "capfd" in request._funcargs:
+ raise request.raiseerror(error_capsysfderror)
+ return CaptureFixture(StdCapture)
+
def pytest_funcarg__capfd(request):
"""enables capturing of writes to file descriptors 1 and 2 and makes
captured output available via ``capsys.readouterr()`` method calls
which return a ``(out, err)`` tuple.
"""
+ if "capsys" in request._funcargs:
+ request.raiseerror(error_capsysfderror)
if not hasattr(os, 'dup'):
- py.test.skip("capfd funcarg needs os.dup")
- return CaptureFuncarg(py.io.StdCaptureFD)
+ pytest.skip("capfd funcarg needs os.dup")
+ return CaptureFixture(StdCaptureFD)
-class CaptureFuncarg:
+
+class CaptureFixture:
def __init__(self, captureclass):
- self.capture = captureclass(now=False)
+ self._capture = captureclass()
def _start(self):
- self.capture.startall()
+ self._capture.startall()
def _finalize(self):
- if hasattr(self, 'capture'):
- self.capture.reset()
- del self.capture
+ if hasattr(self, '_capture'):
+ outerr = self._outerr = self._capture.reset()
+ del self._capture
+ return outerr
def readouterr(self):
- return self.capture.readouterr()
+ try:
+ return self._capture.readouterr()
+ except AttributeError:
+ return self._outerr
def close(self):
self._finalize()
+
+
+class FDCapture:
+ """ Capture IO to/from a given os-level filedescriptor. """
+
+ def __init__(self, targetfd, tmpfile=None, patchsys=False):
+ """ save targetfd descriptor, and open a new
+ temporary file there. If no tmpfile is
+ specified a tempfile.Tempfile() will be opened
+ in text mode.
+ """
+ self.targetfd = targetfd
+ if tmpfile is None and targetfd != 0:
+ f = tempfile.TemporaryFile('wb+')
+ tmpfile = dupfile(f, encoding="UTF-8")
+ f.close()
+ self.tmpfile = tmpfile
+ self._savefd = os.dup(self.targetfd)
+ if patchsys:
+ self._oldsys = getattr(sys, patchsysdict[targetfd])
+
+ def start(self):
+ try:
+ os.fstat(self._savefd)
+ except OSError:
+ raise ValueError(
+ "saved filedescriptor not valid, "
+ "did you call start() twice?")
+ if self.targetfd == 0 and not self.tmpfile:
+ fd = os.open(os.devnull, os.O_RDONLY)
+ os.dup2(fd, 0)
+ os.close(fd)
+ if hasattr(self, '_oldsys'):
+ setattr(sys, patchsysdict[self.targetfd], DontReadFromInput())
+ else:
+ os.dup2(self.tmpfile.fileno(), self.targetfd)
+ if hasattr(self, '_oldsys'):
+ setattr(sys, patchsysdict[self.targetfd], self.tmpfile)
+
+ def done(self):
+ """ unpatch and clean up, returns the self.tmpfile (file object)
+ """
+ os.dup2(self._savefd, self.targetfd)
+ os.close(self._savefd)
+ if self.targetfd != 0:
+ self.tmpfile.seek(0)
+ if hasattr(self, '_oldsys'):
+ setattr(sys, patchsysdict[self.targetfd], self._oldsys)
+ return self.tmpfile
+
+ def writeorg(self, data):
+ """ write a string to the original file descriptor
+ """
+ tempfp = tempfile.TemporaryFile()
+ try:
+ os.dup2(self._savefd, tempfp.fileno())
+ tempfp.write(data)
+ finally:
+ tempfp.close()
+
+
+def dupfile(f, mode=None, buffering=0, raising=False, encoding=None):
+ """ return a new open file object that's a duplicate of f
+
+ mode is duplicated if not given, 'buffering' controls
+ buffer size (defaulting to no buffering) and 'raising'
+ defines whether an exception is raised when an incompatible
+ file object is passed in (if raising is False, the file
+ object itself will be returned)
+ """
+ try:
+ fd = f.fileno()
+ mode = mode or f.mode
+ except AttributeError:
+ if raising:
+ raise
+ return f
+ newfd = os.dup(fd)
+ if sys.version_info >= (3, 0):
+ if encoding is not None:
+ mode = mode.replace("b", "")
+ buffering = True
+ return os.fdopen(newfd, mode, buffering, encoding, closefd=True)
+ else:
+ f = os.fdopen(newfd, mode, buffering)
+ if encoding is not None:
+ return EncodedFile(f, encoding)
+ return f
+
+
+class EncodedFile(object):
+ def __init__(self, _stream, encoding):
+ self._stream = _stream
+ self.encoding = encoding
+
+ def write(self, obj):
+ if isinstance(obj, unicode):
+ obj = obj.encode(self.encoding)
+ self._stream.write(obj)
+
+ def writelines(self, linelist):
+ data = ''.join(linelist)
+ self.write(data)
+
+ def __getattr__(self, name):
+ return getattr(self._stream, name)
+
+
+class Capture(object):
+ def reset(self):
+ """ reset sys.stdout/stderr and return captured output as strings. """
+ if hasattr(self, '_reset'):
+ raise ValueError("was already reset")
+ self._reset = True
+ outfile, errfile = self.done(save=False)
+ out, err = "", ""
+ if outfile and not outfile.closed:
+ out = outfile.read()
+ outfile.close()
+ if errfile and errfile != outfile and not errfile.closed:
+ err = errfile.read()
+ errfile.close()
+ return out, err
+
+ def suspend(self):
+ """ return current snapshot captures, memorize tempfiles. """
+ outerr = self.readouterr()
+ outfile, errfile = self.done()
+ return outerr
+
+
+class StdCaptureFD(Capture):
+ """ This class allows to capture writes to FD1 and FD2
+ and may connect a NULL file to FD0 (and prevent
+ reads from sys.stdin). If any of the 0,1,2 file descriptors
+ is invalid it will not be captured.
+ """
+ def __init__(self, out=True, err=True, in_=True, patchsys=True):
+ self._options = {
+ "out": out,
+ "err": err,
+ "in_": in_,
+ "patchsys": patchsys,
+ }
+ self._save()
+
+ def _save(self):
+ in_ = self._options['in_']
+ out = self._options['out']
+ err = self._options['err']
+ patchsys = self._options['patchsys']
+ if in_:
+ try:
+ self.in_ = FDCapture(
+ 0, tmpfile=None,
+ patchsys=patchsys)
+ except OSError:
+ pass
+ if out:
+ tmpfile = None
+ if hasattr(out, 'write'):
+ tmpfile = out
+ try:
+ self.out = FDCapture(
+ 1, tmpfile=tmpfile,
+ patchsys=patchsys)
+ self._options['out'] = self.out.tmpfile
+ except OSError:
+ pass
+ if err:
+ if hasattr(err, 'write'):
+ tmpfile = err
+ else:
+ tmpfile = None
+ try:
+ self.err = FDCapture(
+ 2, tmpfile=tmpfile,
+ patchsys=patchsys)
+ self._options['err'] = self.err.tmpfile
+ except OSError:
+ pass
+
+ def startall(self):
+ if hasattr(self, 'in_'):
+ self.in_.start()
+ if hasattr(self, 'out'):
+ self.out.start()
+ if hasattr(self, 'err'):
+ self.err.start()
+
+ def resume(self):
+ """ resume capturing with original temp files. """
+ self.startall()
+
+ def done(self, save=True):
+ """ return (outfile, errfile) and stop capturing. """
+ outfile = errfile = None
+ if hasattr(self, 'out') and not self.out.tmpfile.closed:
+ outfile = self.out.done()
+ if hasattr(self, 'err') and not self.err.tmpfile.closed:
+ errfile = self.err.done()
+ if hasattr(self, 'in_'):
+ self.in_.done()
+ if save:
+ self._save()
+ return outfile, errfile
+
+ def readouterr(self):
+ """ return snapshot value of stdout/stderr capturings. """
+ out = self._readsnapshot('out')
+ err = self._readsnapshot('err')
+ return out, err
+
+ def _readsnapshot(self, name):
+ if hasattr(self, name):
+ f = getattr(self, name).tmpfile
+ else:
+ return ''
+
+ f.seek(0)
+ res = f.read()
+ enc = getattr(f, "encoding", None)
+ if enc:
+ res = py.builtin._totext(res, enc, "replace")
+ f.truncate(0)
+ f.seek(0)
+ return res
+
+
+class StdCapture(Capture):
+ """ This class allows to capture writes to sys.stdout|stderr "in-memory"
+ and will raise errors on tries to read from sys.stdin. It only
+ modifies sys.stdout|stderr|stdin attributes and does not
+ touch underlying File Descriptors (use StdCaptureFD for that).
+ """
+ def __init__(self, out=True, err=True, in_=True):
+ self._oldout = sys.stdout
+ self._olderr = sys.stderr
+ self._oldin = sys.stdin
+ if out and not hasattr(out, 'file'):
+ out = TextIO()
+ self.out = out
+ if err:
+ if not hasattr(err, 'write'):
+ err = TextIO()
+ self.err = err
+ self.in_ = in_
+
+ def startall(self):
+ if self.out:
+ sys.stdout = self.out
+ if self.err:
+ sys.stderr = self.err
+ if self.in_:
+ sys.stdin = self.in_ = DontReadFromInput()
+
+ def done(self, save=True):
+ """ return (outfile, errfile) and stop capturing. """
+ outfile = errfile = None
+ if self.out and not self.out.closed:
+ sys.stdout = self._oldout
+ outfile = self.out
+ outfile.seek(0)
+ if self.err and not self.err.closed:
+ sys.stderr = self._olderr
+ errfile = self.err
+ errfile.seek(0)
+ if self.in_:
+ sys.stdin = self._oldin
+ return outfile, errfile
+
+ def resume(self):
+ """ resume capturing with original temp files. """
+ self.startall()
+
+ def readouterr(self):
+ """ return snapshot value of stdout/stderr capturings. """
+ out = err = ""
+ if self.out:
+ out = self.out.getvalue()
+ self.out.truncate(0)
+ self.out.seek(0)
+ if self.err:
+ err = self.err.getvalue()
+ self.err.truncate(0)
+ self.err.seek(0)
+ return out, err
+
+
+class DontReadFromInput:
+ """Temporary stub class. Ideally when stdin is accessed, the
+ capturing should be turned off, with possibly all data captured
+ so far sent to the screen. This should be configurable, though,
+ because in automated test runs it is better to crash than
+ hang indefinitely.
+ """
+ def read(self, *args):
+ raise IOError("reading from stdin while output is captured")
+ readline = read
+ readlines = read
+ __iter__ = read
+
+ def fileno(self):
+ raise ValueError("redirected Stdin is pseudofile, has no fileno()")
+
+ def isatty(self):
+ return False
+
+ def close(self):
+ pass
diff --git a/_pytest/config.py b/_pytest/config.py
--- a/_pytest/config.py
+++ b/_pytest/config.py
@@ -1,25 +1,91 @@
""" command line options, ini-file and conftest.py processing. """
import py
+# DON't import pytest here because it causes import cycle troubles
import sys, os
+from _pytest import hookspec # the extension point definitions
from _pytest.core import PluginManager
-import pytest
-def pytest_cmdline_parse(pluginmanager, args):
- config = Config(pluginmanager)
- config.parse(args)
- return config
+# pytest startup
-def pytest_unconfigure(config):
- while 1:
- try:
- fin = config._cleanup.pop()
- except IndexError:
- break
- fin()
+def main(args=None, plugins=None):
+ """ return exit code, after performing an in-process test run.
+
+ :arg args: list of command line arguments.
+
+ :arg plugins: list of plugin objects to be auto-registered during
+ initialization.
+ """
+ config = _prepareconfig(args, plugins)
+ return config.hook.pytest_cmdline_main(config=config)
+
+class cmdline: # compatibility namespace
+ main = staticmethod(main)
+
+class UsageError(Exception):
+ """ error in pytest usage or invocation"""
+
+_preinit = []
+
+default_plugins = (
+ "mark main terminal runner python pdb unittest capture skipping "
+ "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion genscript "
+ "junitxml resultlog doctest").split()
+
+def _preloadplugins():
+ assert not _preinit
+ _preinit.append(get_plugin_manager())
+
+def get_plugin_manager():
+ if _preinit:
+ return _preinit.pop(0)
+ # subsequent calls to main will create a fresh instance
+ pluginmanager = PytestPluginManager()
+ pluginmanager.config = Config(pluginmanager) # XXX attr needed?
+ for spec in default_plugins:
+ pluginmanager.import_plugin(spec)
+ return pluginmanager
+
+def _prepareconfig(args=None, plugins=None):
+ if args is None:
+ args = sys.argv[1:]
+ elif isinstance(args, py.path.local):
+ args = [str(args)]
+ elif not isinstance(args, (tuple, list)):
+ if not isinstance(args, str):
+ raise ValueError("not a string or argument list: %r" % (args,))
+ args = py.std.shlex.split(args)
+ pluginmanager = get_plugin_manager()
+ if plugins:
+ for plugin in plugins:
+ pluginmanager.register(plugin)
+ return pluginmanager.hook.pytest_cmdline_parse(
+ pluginmanager=pluginmanager, args=args)
+
+class PytestPluginManager(PluginManager):
+ def __init__(self, hookspecs=[hookspec]):
+ super(PytestPluginManager, self).__init__(hookspecs=hookspecs)
+ self.register(self)
+ if os.environ.get('PYTEST_DEBUG'):
+ err = sys.stderr
+ encoding = getattr(err, 'encoding', 'utf8')
+ try:
+ err = py.io.dupfile(err, encoding=encoding)
+ except Exception:
+ pass
+ self.trace.root.setwriter(err.write)
+
+ def pytest_configure(self, config):
+ config.addinivalue_line("markers",
+ "tryfirst: mark a hook implementation function such that the "
+ "plugin machinery will try to call it first/as early as possible.")
+ config.addinivalue_line("markers",
+ "trylast: mark a hook implementation function such that the "
+ "plugin machinery will try to call it last/as late as possible.")
+
class Parser:
- """ Parser for command line arguments. """
+ """ Parser for command line arguments and ini-file values. """
def __init__(self, usage=None, processopt=None):
self._anonymous = OptionGroup("custom options", parser=self)
@@ -35,15 +101,17 @@
if option.dest:
self._processopt(option)
- def addnote(self, note):
- self._notes.append(note)
-
def getgroup(self, name, description="", after=None):
""" get (or create) a named option Group.
- :name: unique name of the option group.
+ :name: name of the option group.
:description: long description for --help output.
:after: name of other group, used for ordering --help output.
+
+ The returned group object has an ``addoption`` method with the same
+ signature as :py:func:`parser.addoption
+ <_pytest.config.Parser.addoption>` but will be shown in the
+ respective group in the output of ``pytest. --help``.
"""
for group in self._groups:
if group.name == name:
@@ -57,33 +125,222 @@
return group
def addoption(self, *opts, **attrs):
- """ add an optparse-style option. """
+ """ register a command line option.
+
+ :opts: option names, can be short or long options.
+ :attrs: same attributes which the ``add_option()`` function of the
+ `argparse library
+ `_
+ accepts.
+
+ After command line parsing options are available on the pytest config
+ object via ``config.option.NAME`` where ``NAME`` is usually set
+ by passing a ``dest`` attribute, for example
+ ``addoption("--long", dest="NAME", ...)``.
+ """
self._anonymous.addoption(*opts, **attrs)
def parse(self, args):
- self.optparser = optparser = MyOptionParser(self)
+ from _pytest._argcomplete import try_argcomplete
+ self.optparser = self._getparser()
+ try_argcomplete(self.optparser)
+ return self.optparser.parse_args([str(x) for x in args])
+
+ def _getparser(self):
+ from _pytest._argcomplete import filescompleter
+ optparser = MyOptionParser(self)
groups = self._groups + [self._anonymous]
for group in groups:
if group.options:
desc = group.description or group.name
- optgroup = py.std.optparse.OptionGroup(optparser, desc)
- optgroup.add_options(group.options)
- optparser.add_option_group(optgroup)
- return self.optparser.parse_args([str(x) for x in args])
+ arggroup = optparser.add_argument_group(desc)
+ for option in group.options:
+ n = option.names()
+ a = option.attrs()
+ arggroup.add_argument(*n, **a)
+ # bash like autocompletion for dirs (appending '/')
+ optparser.add_argument(FILE_OR_DIR, nargs='*'
+ ).completer=filescompleter
+ return optparser
def parse_setoption(self, args, option):
- parsedoption, args = self.parse(args)
+ parsedoption = self.parse(args)
for name, value in parsedoption.__dict__.items():
setattr(option, name, value)
- return args
+ return getattr(parsedoption, FILE_OR_DIR)
+
+ def parse_known_args(self, args):
+ optparser = self._getparser()
+ args = [str(x) for x in args]
+ return optparser.parse_known_args(args)[0]
def addini(self, name, help, type=None, default=None):
- """ add an ini-file option with the given name and description. """
+ """ register an ini-file option.
+
+ :name: name of the ini-variable
+ :type: type of the variable, can be ``pathlist``, ``args`` or ``linelist``.
+ :default: default value if no ini-file option exists but is queried.
+
+ The value of ini-variables can be retrieved via a call to
+ :py:func:`config.getini(name) <_pytest.config.Config.getini>`.
+ """
assert type in (None, "pathlist", "args", "linelist")
self._inidict[name] = (help, type, default)
self._ininames.append(name)
+class ArgumentError(Exception):
+ """
+ Raised if an Argument instance is created with invalid or
+ inconsistent arguments.
+ """
+
+ def __init__(self, msg, option):
+ self.msg = msg
+ self.option_id = str(option)
+
+ def __str__(self):
+ if self.option_id:
+ return "option %s: %s" % (self.option_id, self.msg)
+ else:
+ return self.msg
+
+
+class Argument:
+ """class that mimics the necessary behaviour of py.std.optparse.Option """
+ _typ_map = {
+ 'int': int,
+ 'string': str,
+ }
+ # enable after some grace period for plugin writers
+ TYPE_WARN = False
+
+ def __init__(self, *names, **attrs):
+ """store parms in private vars for use in add_argument"""
+ self._attrs = attrs
+ self._short_opts = []
+ self._long_opts = []
+ self.dest = attrs.get('dest')
+ if self.TYPE_WARN:
+ try:
+ help = attrs['help']
+ if '%default' in help:
+ py.std.warnings.warn(
+ 'pytest now uses argparse. "%default" should be'
+ ' changed to "%(default)s" ',
+ FutureWarning,
+ stacklevel=3)
+ except KeyError:
+ pass
+ try:
+ typ = attrs['type']
+ except KeyError:
+ pass
+ else:
+ # this might raise a keyerror as well, don't want to catch that
+ if isinstance(typ, py.builtin._basestring):
+ if typ == 'choice':
+ if self.TYPE_WARN:
+ py.std.warnings.warn(
+ 'type argument to addoption() is a string %r.'
+ ' For parsearg this is optional and when supplied '
+ ' should be a type.'
+ ' (options: %s)' % (typ, names),
+ FutureWarning,
+ stacklevel=3)
+ # argparse expects a type here take it from
+ # the type of the first element
+ attrs['type'] = type(attrs['choices'][0])
+ else:
+ if self.TYPE_WARN:
+ py.std.warnings.warn(
+ 'type argument to addoption() is a string %r.'
+ ' For parsearg this should be a type.'
+ ' (options: %s)' % (typ, names),
+ FutureWarning,
+ stacklevel=3)
+ attrs['type'] = Argument._typ_map[typ]
+ # used in test_parseopt -> test_parse_defaultgetter
+ self.type = attrs['type']
+ else:
+ self.type = typ
+ try:
+ # attribute existence is tested in Config._processopt
+ self.default = attrs['default']
+ except KeyError:
+ pass
+ self._set_opt_strings(names)
+ if not self.dest:
+ if self._long_opts:
+ self.dest = self._long_opts[0][2:].replace('-', '_')
+ else:
+ try:
+ self.dest = self._short_opts[0][1:]
+ except IndexError:
+ raise ArgumentError(
+ 'need a long or short option', self)
+
+ def names(self):
+ return self._short_opts + self._long_opts
+
+ def attrs(self):
+ # update any attributes set by processopt
+ attrs = 'default dest help'.split()
+ if self.dest:
+ attrs.append(self.dest)
+ for attr in attrs:
+ try:
+ self._attrs[attr] = getattr(self, attr)
+ except AttributeError:
+ pass
+ if self._attrs.get('help'):
+ a = self._attrs['help']
+ a = a.replace('%default', '%(default)s')
+ #a = a.replace('%prog', '%(prog)s')
+ self._attrs['help'] = a
+ return self._attrs
+
+ def _set_opt_strings(self, opts):
+ """directly from optparse
+
+ might not be necessary as this is passed to argparse later on"""
+ for opt in opts:
+ if len(opt) < 2:
+ raise ArgumentError(
+ "invalid option string %r: "
+ "must be at least two characters long" % opt, self)
+ elif len(opt) == 2:
+ if not (opt[0] == "-" and opt[1] != "-"):
+ raise ArgumentError(
+ "invalid short option string %r: "
+ "must be of the form -x, (x any non-dash char)" % opt,
+ self)
+ self._short_opts.append(opt)
+ else:
+ if not (opt[0:2] == "--" and opt[2] != "-"):
+ raise ArgumentError(
+ "invalid long option string %r: "
+ "must start with --, followed by non-dash" % opt,
+ self)
+ self._long_opts.append(opt)
+
+ def __repr__(self):
+ retval = 'Argument('
+ if self._short_opts:
+ retval += '_short_opts: ' + repr(self._short_opts) + ', '
+ if self._long_opts:
+ retval += '_long_opts: ' + repr(self._long_opts) + ', '
+ retval += 'dest: ' + repr(self.dest) + ', '
+ if hasattr(self, 'type'):
+ retval += 'type: ' + repr(self.type) + ', '
+ if hasattr(self, 'default'):
+ retval += 'default: ' + repr(self.default) + ', '
+ if retval[-2:] == ', ': # always long enough to test ("Argument(" )
+ retval = retval[:-2]
+ retval += ')'
+ return retval
+
+
class OptionGroup:
def __init__(self, name, description="", parser=None):
self.name = name
@@ -92,12 +349,18 @@
self.parser = parser
def addoption(self, *optnames, **attrs):
- """ add an option to this group. """
- option = py.std.optparse.Option(*optnames, **attrs)
+ """ add an option to this group.
+
+ if a shortened version of a long option is specified it will
+ be suppressed in the help. addoption('--twowords', '--two-words')
+ results in help showing '--two-words' only, but --twowords gets
+ accepted **and** the automatic destination is in args.twowords
+ """
+ option = Argument(*optnames, **attrs)
self._addoption_instance(option, shortupper=False)
def _addoption(self, *optnames, **attrs):
- option = py.std.optparse.Option(*optnames, **attrs)
+ option = Argument(*optnames, **attrs)
self._addoption_instance(option, shortupper=True)
From noreply at buildbot.pypy.org Sun Aug 17 19:35:29 2014
From: noreply at buildbot.pypy.org (numerodix)
Date: Sun, 17 Aug 2014 19:35:29 +0200 (CEST)
Subject: [pypy-commit] pypy py3.3-fixes2: implement flush kwarg for print()
Message-ID: <20140817173529.A0F851C347F@cobra.cs.uni-duesseldorf.de>
Author: Martin Matusiak
Branch: py3.3-fixes2
Changeset: r72843:c92b637d6a83
Date: 2014-08-17 15:01 +0200
http://bitbucket.org/pypy/pypy/changeset/c92b637d6a83/
Log: implement flush kwarg for print()
diff --git a/pypy/module/__builtin__/app_io.py b/pypy/module/__builtin__/app_io.py
--- a/pypy/module/__builtin__/app_io.py
+++ b/pypy/module/__builtin__/app_io.py
@@ -57,13 +57,14 @@
return line
def print_(*args, **kwargs):
- r"""print(value, ..., sep=' ', end='\n', file=sys.stdout)
+ r"""print(value, ..., sep=' ', end='\n', file=sys.stdout, flush=False)
Prints the values to a stream, or to sys.stdout by default.
Optional keyword arguments:
- file: a file-like object (stream); defaults to the current sys.stdout.
- sep: string inserted between values, default a space.
- end: string appended after the last value, default a newline.
+ file: a file-like object (stream); defaults to the current sys.stdout.
+ sep: string inserted between values, default a space.
+ end: string appended after the last value, default a newline.
+ flush: whether to forcibly flush the stream.
"""
fp = kwargs.pop("file", None)
if fp is None:
@@ -80,6 +81,7 @@
if end is not None:
if not isinstance(end, str):
raise TypeError("end must be None or a string")
+ flush = kwargs.pop('flush', None)
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if sep is None:
@@ -91,3 +93,5 @@
write(sep)
write(arg)
write(end)
+ if flush:
+ fp.flush()
diff --git a/pypy/module/__builtin__/test/test_print.py b/pypy/module/__builtin__/test/test_print.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/__builtin__/test/test_print.py
@@ -0,0 +1,29 @@
+class AppTestPrint:
+
+ def test_print_flush(self):
+ """
+ # operation of the flush flag
+ class filelike():
+ def __init__(self):
+ self.written = ''
+ self.flushed = 0
+ def write(self, str):
+ self.written += str
+ def flush(self):
+ self.flushed += 1
+
+ f = filelike()
+ print(1, file=f, end='', flush=True)
+ print(2, file=f, end='', flush=True)
+ print(3, file=f, flush=False)
+ assert f.written == '123\\n'
+ assert f.flushed == 2
+
+ # ensure exceptions from flush are passed through
+ class noflush():
+ def write(self, str):
+ pass
+ def flush(self):
+ raise RuntimeError
+ raises(RuntimeError, print, 1, file=noflush(), flush=True)
+ """
From noreply at buildbot.pypy.org Sun Aug 17 19:35:30 2014
From: noreply at buildbot.pypy.org (pjenvey)
Date: Sun, 17 Aug 2014 19:35:30 +0200 (CEST)
Subject: [pypy-commit] pypy py3.3: Merged in numerodix/pypy/py3.3-fixes2
(pull request #269)
Message-ID: <20140817173530.CD8471C347F@cobra.cs.uni-duesseldorf.de>
Author: Philip Jenvey
Branch: py3.3
Changeset: r72844:eda8876b5645
Date: 2014-08-17 10:34 -0700
http://bitbucket.org/pypy/pypy/changeset/eda8876b5645/
Log: Merged in numerodix/pypy/py3.3-fixes2 (pull request #269)
py3.3: implement flush kwarg for print()
diff --git a/pypy/module/__builtin__/app_io.py b/pypy/module/__builtin__/app_io.py
--- a/pypy/module/__builtin__/app_io.py
+++ b/pypy/module/__builtin__/app_io.py
@@ -57,13 +57,14 @@
return line
def print_(*args, **kwargs):
- r"""print(value, ..., sep=' ', end='\n', file=sys.stdout)
+ r"""print(value, ..., sep=' ', end='\n', file=sys.stdout, flush=False)
Prints the values to a stream, or to sys.stdout by default.
Optional keyword arguments:
- file: a file-like object (stream); defaults to the current sys.stdout.
- sep: string inserted between values, default a space.
- end: string appended after the last value, default a newline.
+ file: a file-like object (stream); defaults to the current sys.stdout.
+ sep: string inserted between values, default a space.
+ end: string appended after the last value, default a newline.
+ flush: whether to forcibly flush the stream.
"""
fp = kwargs.pop("file", None)
if fp is None:
@@ -80,6 +81,7 @@
if end is not None:
if not isinstance(end, str):
raise TypeError("end must be None or a string")
+ flush = kwargs.pop('flush', None)
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if sep is None:
@@ -91,3 +93,5 @@
write(sep)
write(arg)
write(end)
+ if flush:
+ fp.flush()
diff --git a/pypy/module/__builtin__/test/test_print.py b/pypy/module/__builtin__/test/test_print.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/__builtin__/test/test_print.py
@@ -0,0 +1,29 @@
+class AppTestPrint:
+
+ def test_print_flush(self):
+ """
+ # operation of the flush flag
+ class filelike():
+ def __init__(self):
+ self.written = ''
+ self.flushed = 0
+ def write(self, str):
+ self.written += str
+ def flush(self):
+ self.flushed += 1
+
+ f = filelike()
+ print(1, file=f, end='', flush=True)
+ print(2, file=f, end='', flush=True)
+ print(3, file=f, flush=False)
+ assert f.written == '123\\n'
+ assert f.flushed == 2
+
+ # ensure exceptions from flush are passed through
+ class noflush():
+ def write(self, str):
+ pass
+ def flush(self):
+ raise RuntimeError
+ raises(RuntimeError, print, 1, file=noflush(), flush=True)
+ """
From noreply at buildbot.pypy.org Sun Aug 17 19:36:57 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Sun, 17 Aug 2014 19:36:57 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Document the kind of trace
produced by jit_stm_xxx(), before refactoring it.
Message-ID: <20140817173657.42E9E1C347F@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72845:35c9220c71f4
Date: 2014-08-17 17:38 +0200
http://bitbucket.org/pypy/pypy/changeset/35c9220c71f4/
Log: Document the kind of trace produced by jit_stm_xxx(), before
refactoring it.
diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py
--- a/rpython/rlib/rstm.py
+++ b/rpython/rlib/rstm.py
@@ -53,6 +53,26 @@
if llop.stm_should_break_transaction(lltype.Bool):
llop.stm_transaction_break(lltype.Void)
+# Typical usage of the following two functions:
+#
+# just after jit_merge_point:
+# if rstm.jit_stm_should_break_transaction(False):
+# rstm.jit_stm_transaction_break_point()
+#
+# just before can_enter_jit:
+# if rstm.jit_stm_should_break_transaction(True):
+# rstm.jit_stm_transaction_break_point()
+#
+# resulting JIT trace (common case):
+# ...
+# call_release_gil(...)
+# stm_transaction_break(0) # in-line, because we expect "inevitable"
+# guard_not_forced()
+# ...
+# i1 = stm_should_break_transaction()
+# guard_false(i1) # out-of-line, because rarely needed
+# jump()
+#
def jit_stm_transaction_break_point():
# XXX REFACTOR AWAY
if we_are_translated():
From noreply at buildbot.pypy.org Sun Aug 17 19:36:58 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Sun, 17 Aug 2014 19:36:58 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: In-progress: kill stuff from
the JIT
Message-ID: <20140817173658.932A61C347F@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72846:02f43fae7c8c
Date: 2014-08-17 18:17 +0200
http://bitbucket.org/pypy/pypy/changeset/02f43fae7c8c/
Log: In-progress: kill stuff from the JIT
diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py
--- a/pypy/module/pypyjit/interp_jit.py
+++ b/pypy/module/pypyjit/interp_jit.py
@@ -56,9 +56,6 @@
pypyjitdriver.jit_merge_point(ec=ec,
frame=self, next_instr=next_instr, pycode=pycode,
is_being_profiled=self.is_being_profiled)
- if self.space.threadlocals.threads_running: # quasi-immutable field
- if rstm.jit_stm_should_break_transaction(False):
- rstm.jit_stm_transaction_break_point()
co_code = pycode.co_code
self.valuestackdepth = hint(self.valuestackdepth, promote=True)
@@ -89,8 +86,7 @@
ec.bytecode_trace(self, decr_by)
jumpto = r_uint(self.last_instr)
if self.space.threadlocals.threads_running: # quasi-immutable field
- if rstm.jit_stm_should_break_transaction(True):
- rstm.jit_stm_transaction_break_point()
+ rstm.possible_transaction_break()
#
pypyjitdriver.can_enter_jit(frame=self, ec=ec, next_instr=jumpto,
pycode=self.getcode(),
diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py
--- a/rpython/jit/codewriter/jtransform.py
+++ b/rpython/jit/codewriter/jtransform.py
@@ -1418,20 +1418,9 @@
[v], None))
return ops
- def rewrite_op_jit_stm_should_break_transaction(self, op):
- assert isinstance(op.args[0], Constant)
-
- arg = int(op.args[0].value)
- c_arg = Constant(arg, lltype.Signed)
+ def rewrite_op_stm_rewind_jmp_frame(self, op):
+ return []
- return [SpaceOperation('stm_should_break_transaction',
- [c_arg], op.result),
- SpaceOperation('-live-', [], None),]
-
- def rewrite_op_jit_stm_transaction_break_point(self, op):
- return [SpaceOperation('stm_transaction_break', [], op.result),
- SpaceOperation('-live-', [], None),]
-
def rewrite_op_jit_marker(self, op):
key = op.args[0].value
jitdriver = op.args[1].value
diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py
--- a/rpython/jit/codewriter/test/test_jtransform.py
+++ b/rpython/jit/codewriter/test/test_jtransform.py
@@ -929,15 +929,18 @@
assert block.operations[1].result is None
assert block.exits[0].args == [v1]
-def test_jit_stm_transaction_break_point():
- py.test.skip("XXX?")
- op = SpaceOperation('jit_stm_transaction_break_point',
- [Constant(1, lltype.Signed)], lltype.Void)
+def test_stm_should_break_transaction():
+ op = SpaceOperation('stm_should_break_transaction', [], lltype.Bool)
tr = Transformer()
op2 = tr.rewrite_operation(op)
- assert op2.opname == 'stm_transaction_break'
- assert op2.args[0].value == 1
-
+ assert op2.opname == 'stm_should_break_transaction'
+
+def test_stm_rewind_jmp_frame():
+ op = SpaceOperation('stm_rewind_jmp_frame', [], lltype.Void)
+ tr = Transformer()
+ op2 = tr.rewrite_operation(op)
+ assert op2 == []
+
def test_jit_merge_point_1():
class FakeJitDriverSD:
index = 42
diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py
--- a/rpython/jit/metainterp/blackhole.py
+++ b/rpython/jit/metainterp/blackhole.py
@@ -908,18 +908,15 @@
return False
- @arguments("i", returns="i")
- def bhimpl_stm_should_break_transaction(if_there_is_no_other):
- return False
-
-
- @arguments()
- def bhimpl_stm_transaction_break():
- pass
+ @arguments(returns="i")
+ def bhimpl_stm_should_break_transaction():
+ from rpython.rlib import rstm
+ return rstm.should_break_transaction()
@arguments()
def bhimpl_stm_hint_commit_soon():
- pass
+ from rpython.rlib import rstm
+ rstm.hint_commit_soon()
# ----------
diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py
--- a/rpython/jit/metainterp/heapcache.py
+++ b/rpython/jit/metainterp/heapcache.py
@@ -51,13 +51,6 @@
self.input_indirections = {}
self.output_indirections = {}
-
- # to do some of the work of optimizeopt/stm.py, we have a similar
- # logic here:
- self.stm_break_wanted = True
-
-
-
def _input_indirection(self, box):
return self.input_indirections.get(box, box)
@@ -137,11 +130,8 @@
opnum == rop.SETFIELD_RAW or
opnum == rop.SETARRAYITEM_RAW or
opnum == rop.SETINTERIORFIELD_RAW or
- opnum == rop.RAW_STORE):
- return
- if opnum in (rop.GUARD_NOT_FORCED, rop.GUARD_NOT_FORCED_2,
- rop.STM_HINT_COMMIT_SOON):
- self.stm_break_wanted = True
+ opnum == rop.RAW_STORE or
+ opnum == rop.STM_HINT_COMMIT_SOON):
return
if (rop._OVF_FIRST <= opnum <= rop._OVF_LAST or
rop._NOSIDEEFFECT_FIRST <= opnum <= rop._NOSIDEEFFECT_LAST or
@@ -207,7 +197,6 @@
del cache[frombox]
return
else:
- self.stm_break_wanted = True
# Only invalidate things that are either escaped or arguments
for descr, boxes in self.heap_cache.iteritems():
for box in boxes.keys():
@@ -226,8 +215,6 @@
# above, but hit an assertion in "pypy test_multiprocessing.py".
self.reset(reset_virtuals=False, trace_branch=False)
- self.stm_break_wanted = True
-
def is_class_known(self, box):
return box in self.known_class_boxes
@@ -338,6 +325,3 @@
def replace_box(self, oldbox, newbox):
self.input_indirections[self._output_indirection(newbox)] = self._input_indirection(oldbox)
self.output_indirections[self._input_indirection(oldbox)] = self._output_indirection(newbox)
-
- def stm_break_done(self):
- self.stm_break_wanted = False
diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py
--- a/rpython/jit/metainterp/history.py
+++ b/rpython/jit/metainterp/history.py
@@ -627,7 +627,6 @@
operations = None
call_pure_results = None
stm_info = None
- is_really_loop = False
logops = None
quasi_immutable_deps = None
diff --git a/rpython/jit/metainterp/optimizeopt/__init__.py b/rpython/jit/metainterp/optimizeopt/__init__.py
--- a/rpython/jit/metainterp/optimizeopt/__init__.py
+++ b/rpython/jit/metainterp/optimizeopt/__init__.py
@@ -8,7 +8,6 @@
from rpython.jit.metainterp.optimizeopt.simplify import OptSimplify
from rpython.jit.metainterp.optimizeopt.pure import OptPure
from rpython.jit.metainterp.optimizeopt.earlyforce import OptEarlyForce
-from rpython.jit.metainterp.optimizeopt.stm import OptSTM
from rpython.rlib.jit import PARAMETERS, ENABLE_ALL_OPTS
from rpython.rlib.unroll import unrolling_iterable
from rpython.rlib.debug import debug_start, debug_stop, debug_print
@@ -35,9 +34,6 @@
def build_opt_chain(metainterp_sd, enable_opts):
optimizations = []
unroll = 'unroll' in enable_opts # 'enable_opts' is normally a dict
- if metainterp_sd.config.translation.stm:
- optimizations.append(OptSTM())
-
for name, opt in unroll_all_opts:
if name in enable_opts:
if opt is not None:
diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py
--- a/rpython/jit/metainterp/optimizeopt/heap.py
+++ b/rpython/jit/metainterp/optimizeopt/heap.py
@@ -275,14 +275,12 @@
opnum == rop.COPYSTRCONTENT or # no effect on GC struct/array
opnum == rop.COPYUNICODECONTENT): # no effect on GC struct/array
return
- if (opnum == rop.STM_TRANSACTION_BREAK or
- opnum == rop.CALL_ASSEMBLER):
- self._seen_guard_not_invalidated = False
if (opnum == rop.CALL or
opnum == rop.CALL_PURE or
opnum == rop.COND_CALL or
opnum == rop.CALL_MAY_FORCE or
- opnum == rop.CALL_RELEASE_GIL):
+ opnum == rop.CALL_RELEASE_GIL or
+ opnum == rop.CALL_ASSEMBLER):
if opnum == rop.CALL_ASSEMBLER:
self._seen_guard_not_invalidated = False
else:
diff --git a/rpython/jit/metainterp/optimizeopt/stm.py b/rpython/jit/metainterp/optimizeopt/stm.py
deleted file mode 100644
--- a/rpython/jit/metainterp/optimizeopt/stm.py
+++ /dev/null
@@ -1,94 +0,0 @@
-from rpython.jit.metainterp.optimizeopt.optimizer import (Optimization, )
-from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method
-from rpython.jit.codewriter.effectinfo import EffectInfo
-from rpython.jit.metainterp.resoperation import rop, ResOperation
-
-class OptSTM(Optimization):
- """
- This step removes a lot of uncecessary transaction_breaks (TBs)
- emitted by pyjitpl from traces. We only want to keep these
- unconditional TBs after external calls (identified by GUARD_NOT_FORCED)
- because they are likely to return as inevitable transactions which
- we want to break ASAP.
- Guarded TBs are left in place, as they represent app-level loops
- and are likely points to break between atomic transactions.
-
- The cached_ops is here to remove the virtualizable-forcing added
- by pyjitpl before unconditional TBs. See tests.
- """
- def __init__(self):
- self.remove_next_gnf = False # guard_not_forced
- self.keep_but_ignore_gnf = False
- self.cached_ops = []
-
- def propagate_forward(self, op):
- dispatch_opt(self, op)
-
- def flush_cached(self):
- while self.cached_ops:
- self.emit_operation(self.cached_ops.pop(0))
-
- def flush(self):
- # just in case. it shouldn't be necessary
- self.flush_cached()
-
- def default_emit(self, op):
- self.flush_cached()
- self.emit_operation(op)
-
- def _break_wanted(self):
- is_loop = self.optimizer.loop.is_really_loop
- return self.optimizer.stm_info.get('break_wanted', is_loop)
-
- def _set_break_wanted(self, val):
- self.optimizer.stm_info['break_wanted'] = val
-
- def optimize_FORCE_TOKEN(self, op):
- # if we have cached stuff, flush it. Not our case
- self.flush_cached()
- self.cached_ops.append(op)
-
- def optimize_SETFIELD_GC(self, op):
- if not self.cached_ops:
- # setfield not for force_token
- self.emit_operation(op)
- else:
- assert len(self.cached_ops) == 1
- assert self.cached_ops[0].getopnum() == rop.FORCE_TOKEN
- self.cached_ops.append(op)
-
- def optimize_STM_SHOULD_BREAK_TRANSACTION(self, op):
- self.flush_cached()
- self._set_break_wanted(False)
- self.emit_operation(op)
-
- def optimize_STM_TRANSACTION_BREAK(self, op):
- assert not self.remove_next_gnf
- really_wanted = op.getarg(0).getint()
- if really_wanted or self._break_wanted():
- self.flush_cached()
- self._set_break_wanted(False)
- self.emit_operation(op)
- self.keep_but_ignore_gnf = True
- else:
- self.cached_ops = []
- self.remove_next_gnf = True
-
- def optimize_GUARD_NOT_FORCED(self, op):
- self.flush_cached()
- if self.remove_next_gnf:
- self.remove_next_gnf = False
- else:
- if not self.keep_but_ignore_gnf:
- self._set_break_wanted(True)
- self.keep_but_ignore_gnf = False
- self.emit_operation(op)
-
- def optimize_STM_HINT_COMMIT_SOON(self, op):
- self.flush_cached()
- self._set_break_wanted(True)
- self.emit_operation(op)
-
-
-dispatch_opt = make_dispatcher_method(OptSTM, 'optimize_',
- default=OptSTM.default_emit)
diff --git a/rpython/jit/metainterp/optimizeopt/test/test_stm.py b/rpython/jit/metainterp/optimizeopt/test/test_stm.py
deleted file mode 100644
--- a/rpython/jit/metainterp/optimizeopt/test/test_stm.py
+++ /dev/null
@@ -1,331 +0,0 @@
-from rpython.jit.metainterp.optimizeopt.test.test_optimizeopt import (
- BaseTestWithUnroll,)
-from rpython.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin
-from rpython.jit.codewriter.effectinfo import EffectInfo
-from rpython.rtyper.lltypesystem import lltype, rclass, rffi, llmemory
-
-
-class TestSTM(BaseTestWithUnroll, LLtypeMixin):
- stm = True
-
- namespace = LLtypeMixin.namespace.copy()
- namespace.update(locals())
-
-
- def test_unrolled_loop(self):
- ops = """
- []
- i0 = stm_should_break_transaction()
- guard_false(i0) []
- jump()
- """
- self.optimize_loop(ops, ops, expected_preamble=ops)
-
- def test_really_wanted_tb(self):
- ops = """
- []
- stm_transaction_break(0)
- guard_not_forced() []
-
- stm_transaction_break(1)
- guard_not_forced() []
-
- jump()
- """
- preamble = """
- []
- stm_transaction_break(0)
- guard_not_forced() []
-
- stm_transaction_break(1)
- guard_not_forced() []
-
- jump()
- """
- expected = """
- []
- stm_transaction_break(1)
- guard_not_forced() []
-
- jump()
- """
- self.optimize_loop(ops, expected, expected_preamble=preamble)
-
-
- def test_unrolled_loop2(self):
- ops = """
- []
- stm_transaction_break(0)
- guard_not_forced() []
-
- i0 = stm_should_break_transaction()
- guard_false(i0) []
-
- jump()
- """
- preamble = """
- []
- stm_transaction_break(0)
- guard_not_forced() []
-
- i0 = stm_should_break_transaction()
- guard_false(i0) []
-
- jump()
- """
- expected = """
- []
- i0 = stm_should_break_transaction()
- guard_false(i0) []
- jump()
- """
- self.optimize_loop(ops, expected, expected_preamble=preamble)
-
- def test_not_disable_opt(self):
- ops = """
- [p1]
- i1 = getfield_gc(p1, descr=adescr)
-
- i0 = stm_should_break_transaction()
- guard_false(i0) []
- jump(p1)
- """
- preamble = """
- [p1]
- i1 = getfield_gc(p1, descr=adescr)
-
- i0 = stm_should_break_transaction()
- guard_false(i0) []
- jump(p1)
- """
- expected = """
- [p1]
- i0 = stm_should_break_transaction()
- guard_false(i0) []
-
- jump(p1)
- """
- self.optimize_loop(ops, expected, expected_preamble=preamble)
-
- def test_dont_remove_first_tb(self):
- ops = """
- []
- stm_transaction_break(0)
- guard_not_forced() []
- stm_transaction_break(0)
- guard_not_forced() []
- stm_transaction_break(0)
- guard_not_forced() []
- i0 = stm_should_break_transaction()
- guard_false(i0) []
- jump()
- """
- preamble = """
- []
- stm_transaction_break(0)
- guard_not_forced() []
-
- i0 = stm_should_break_transaction()
- guard_false(i0) []
- jump()
- """
- expected = """
- []
- i0 = stm_should_break_transaction()
- guard_false(i0) []
- jump()
- """
- self.optimize_loop(ops, expected, expected_preamble=preamble)
-
- def test_add_tb_after_guard_not_forced(self):
- ops = """
- []
- stm_transaction_break(0)
- guard_not_forced() []
-
- escape() # e.g. like a call_release_gil
- guard_not_forced() []
-
- stm_transaction_break(0)
- guard_not_forced() []
- stm_transaction_break(0)
- guard_not_forced() []
- i0 = stm_should_break_transaction()
- guard_false(i0) []
- jump()
- """
- preamble = """
- []
- stm_transaction_break(0)
- guard_not_forced() []
-
- escape()
- guard_not_forced() []
-
- stm_transaction_break(0)
- guard_not_forced() []
-
- i0 = stm_should_break_transaction()
- guard_false(i0) []
- jump()
- """
- expected = """
- []
- escape()
- guard_not_forced() []
-
- stm_transaction_break(0)
- guard_not_forced() []
-
- i0 = stm_should_break_transaction()
- guard_false(i0) []
- jump()
- """
- self.optimize_loop(ops, expected, expected_preamble=preamble)
-
- def test_remove_force_token(self):
- ops = """
- [p0]
- p1 = force_token()
- setfield_gc(p0, p1, descr=adescr)
- stm_transaction_break(0)
- guard_not_forced() []
-
- p2 = force_token()
- setfield_gc(p0, p2, descr=adescr)
- stm_transaction_break(0)
- guard_not_forced() []
-
- p3 = force_token()
- setfield_gc(p0, p3, descr=adescr)
- stm_transaction_break(0)
- guard_not_forced() []
-
- escape()
-
- p4 = force_token()
- setfield_gc(p0, p4, descr=adescr)
- stm_transaction_break(0)
- guard_not_forced() []
-
- p6 = force_token() # not removed!
-
- i0 = stm_should_break_transaction()
- guard_false(i0) []
- jump(p0)
- """
- preamble = """
- [p0]
- p1 = force_token()
- setfield_gc(p0, p1, descr=adescr)
- stm_transaction_break(0)
- guard_not_forced() []
-
- escape()
-
- p6 = force_token() # not removed!
-
- i0 = stm_should_break_transaction()
- guard_false(i0) []
- jump(p0)
- """
- expected = """
- [p0]
- escape()
-
- p6 = force_token() # not removed!
-
- i0 = stm_should_break_transaction()
- guard_false(i0) []
- jump(p0)
- """
- self.optimize_loop(ops, expected, expected_preamble=preamble)
-
- def test_not_remove_setfield(self):
- ops = """
- [p0, p1]
- setfield_gc(p0, p1, descr=adescr)
- stm_transaction_break(0)
-
- p2 = force_token()
- p3 = force_token()
- jump(p0, p1)
- """
- preamble = """
- [p0, p1]
- setfield_gc(p0, p1, descr=adescr)
- stm_transaction_break(0)
-
- p2 = force_token()
- p3 = force_token()
- jump(p0, p1)
- """
- expected = """
- [p0, p1]
- p2 = force_token()
- p3 = force_token()
-
- setfield_gc(p0, p1, descr=adescr) # moved here by other stuff...
- jump(p0, p1)
- """
- self.optimize_loop(ops, expected, expected_preamble=preamble)
-
- def test_stm_location_1(self):
- # This tests setfield_gc on a non-virtual. On a virtual, it doesn't
- # really matter, because STM conflicts are impossible anyway
- ops = """
- [i1, p1]
- setfield_gc(p1, i1, descr=adescr) {81}
- call(i1, descr=nonwritedescr) {90}
- jump(i1, p1)
- """
- expected = """
- [i1, p1]
- call(i1, descr=nonwritedescr) {90}
- setfield_gc(p1, i1, descr=adescr) {81}
- jump(i1, p1)
- """
- self.optimize_loop(ops, expected)
-
- def test_add_tb_after_commit_soon(self):
- ops = """
- []
- stm_transaction_break(0)
- guard_not_forced() []
-
- stm_hint_commit_soon()
-
- stm_transaction_break(0)
- guard_not_forced() []
- stm_transaction_break(0)
- guard_not_forced() []
- i0 = stm_should_break_transaction()
- guard_false(i0) []
- jump()
- """
- preamble = """
- []
- stm_transaction_break(0)
- guard_not_forced() []
-
- stm_hint_commit_soon()
-
- stm_transaction_break(0)
- guard_not_forced() []
-
- i0 = stm_should_break_transaction()
- guard_false(i0) []
- jump()
- """
- expected = """
- []
- stm_hint_commit_soon()
-
- stm_transaction_break(0)
- guard_not_forced() []
-
- i0 = stm_should_break_transaction()
- guard_false(i0) []
- jump()
- """
- self.optimize_loop(ops, expected, expected_preamble=preamble)
diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py
--- a/rpython/jit/metainterp/optimizeopt/test/test_util.py
+++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py
@@ -1,6 +1,6 @@
import py, random
-from rpython.rtyper.lltypesystem import lltype, llmemory, rclass, rstr, rffi
+from rpython.rtyper.lltypesystem import lltype, llmemory, rclass, rffi
from rpython.rtyper.lltypesystem.rclass import OBJECT, OBJECT_VTABLE
from rpython.rtyper.rclass import FieldListAccessor, IR_QUASIIMMUTABLE
@@ -331,7 +331,7 @@
def get_name_from_address(self, addr):
# hack
try:
- return "".join(addr.ptr.name)[:-1] # remove \x00
+ return "".join(addr.ptr.name.chars)
except AttributeError:
return ""
diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py
--- a/rpython/jit/metainterp/optimizeopt/unroll.py
+++ b/rpython/jit/metainterp/optimizeopt/unroll.py
@@ -75,7 +75,6 @@
start_label = loop.operations[0]
if start_label.getopnum() == rop.LABEL:
- loop.is_really_loop = True
loop.operations = loop.operations[1:]
# We need to emit the label op before import_state() as emitting it
# will clear heap caches
diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
--- a/rpython/jit/metainterp/pyjitpl.py
+++ b/rpython/jit/metainterp/pyjitpl.py
@@ -186,52 +186,16 @@
raise AssertionError("bad result box type")
# ------------------------------
- def _record_stm_transaction_break(self, really_wanted):
- # records an unconditional stm_transaction_break
- mi = self.metainterp
- mi.vable_and_vrefs_before_residual_call()
- mi._record_helper_nonpure_varargs(
- rop.STM_TRANSACTION_BREAK, None, None,
- [history.ConstInt(really_wanted)])
- mi.vrefs_after_residual_call()
- mi.vable_after_residual_call()
- #
- if not really_wanted:
- # we're about the return ConstInt(0), which will go into the
- # jitcode's %iN variable. But it will be captured by the
- # GUARD_NOT_FORCED's resume data too. It is essential that we
- # don't capture the old, stale value! Also, store ConstInt(1)
- # to make sure that upon resuming we'll see a result of 1 (XXX
- # unsure if it's needed, but it shouldn't hurt).
- self.make_result_of_lastop(ConstInt(1))
- #
- mi.generate_guard(rop.GUARD_NOT_FORCED, None)
- self.metainterp.heapcache.stm_break_done()
-
-
- @arguments("int")
- def opimpl_stm_should_break_transaction(self, if_there_is_no_other):
- val = bool(if_there_is_no_other)
- mi = self.metainterp
- if val:
- # app-level loop: only one of these per loop is really needed
- resbox = history.BoxInt(0)
- mi.history.record(rop.STM_SHOULD_BREAK_TRANSACTION, [], resbox)
- self.metainterp.heapcache.stm_break_done()
- return resbox
- else:
- # between byte-code instructions: only keep if it is
- # likely that we are inevitable here
- if self.metainterp.heapcache.stm_break_wanted:
- self._record_stm_transaction_break(False)
- return ConstInt(0)
@arguments()
- def opimpl_stm_transaction_break(self):
- # always wanted: inserted after we compile a bridge because there
- # were just too many breaks and we failed the should_break&guard
- # because of that
- self._record_stm_transaction_break(True)
+ def opimpl_stm_should_break_transaction(self):
+ # XXX make it return BoxInt(1) instead of BoxInt(0) if there
+ # is an inevitable transaction, because it's likely that there
+ # will always be an inevitable transaction here
+ resbox = history.BoxInt(0)
+ mi = self.metainterp
+ mi.history.record(rop.STM_SHOULD_BREAK_TRANSACTION, [], resbox)
+ return resbox
@arguments()
def opimpl_stm_hint_commit_soon(self):
@@ -1855,8 +1819,6 @@
if opnum == rop.GUARD_NOT_FORCED or opnum == rop.GUARD_NOT_FORCED_2:
resumedescr = compile.ResumeGuardForcedDescr(self.staticdata,
self.jitdriver_sd)
- # for detecting stm breaks that are needed
- self.heapcache.invalidate_caches(opnum, resumedescr, moreargs)
elif opnum == rop.GUARD_NOT_INVALIDATED:
resumedescr = compile.ResumeGuardNotInvalidated()
elif opnum == rop.GUARD_FUTURE_CONDITION:
diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py
--- a/rpython/jit/metainterp/resoperation.py
+++ b/rpython/jit/metainterp/resoperation.py
@@ -521,7 +521,6 @@
'QUASIIMMUT_FIELD/1d', # [objptr], descr=SlowMutateDescr
'RECORD_KNOWN_CLASS/2', # [objptr, clsptr]
'KEEPALIVE/1',
- 'STM_TRANSACTION_BREAK/1',
'STM_READ/1',
'_CANRAISE_FIRST', # ----- start of can_raise operations -----
diff --git a/rpython/jit/metainterp/test/test_stm.py b/rpython/jit/metainterp/test/test_stm.py
--- a/rpython/jit/metainterp/test/test_stm.py
+++ b/rpython/jit/metainterp/test/test_stm.py
@@ -11,63 +11,10 @@
class STMTests:
def test_simple(self):
def g():
- return rstm.jit_stm_should_break_transaction(False)
+ return rstm.should_break_transaction()
res = self.interp_operations(g, [], translationoptions={"stm":True})
assert res == False
- self.check_operations_history(stm_transaction_break=1,
- stm_should_break_transaction=0)
-
- def test_not_removed(self):
- import time
- def g():
- time.sleep(0)
- return rstm.jit_stm_should_break_transaction(False)
- res = self.interp_operations(g, [], translationoptions={"stm":True})
- assert res == False
- self.check_operations_history(stm_transaction_break=1,
- call_may_force=1,
- stm_should_break_transaction=0)
-
- def test_not_removed2(self):
- def g():
- return rstm.jit_stm_should_break_transaction(True)
- res = self.interp_operations(g, [], translationoptions={"stm":True})
- assert res == False
- self.check_operations_history(stm_transaction_break=0,
- stm_should_break_transaction=1)
-
- def test_transaction_break(self):
- def g():
- rstm.jit_stm_transaction_break_point()
- return 42
- self.interp_operations(g, [], translationoptions={"stm":True})
- self.check_operations_history({'stm_transaction_break':1,
- 'guard_not_forced':1})
-
- def test_heapcache(self):
- import time
- def g():
- rstm.jit_stm_should_break_transaction(True) # keep (start of loop)
- rstm.jit_stm_should_break_transaction(False)
- time.sleep(0)
- rstm.jit_stm_should_break_transaction(False) # keep (after guard_not_forced)
- rstm.jit_stm_should_break_transaction(False)
- rstm.jit_stm_should_break_transaction(True) # keep (True)
- rstm.jit_stm_should_break_transaction(True) # keep (True)
- rstm.jit_stm_should_break_transaction(False)
- rstm.hint_commit_soon()
- rstm.jit_stm_should_break_transaction(False) # keep
- rstm.jit_stm_should_break_transaction(False)
- return 42
- res = self.interp_operations(g, [], translationoptions={"stm":True})
- assert res == 42
- self.check_operations_history({
- 'stm_transaction_break':2,
- 'stm_hint_commit_soon':1,
- 'stm_should_break_transaction':3,
- 'guard_not_forced':3,
- 'guard_no_exception':1,
- 'call_may_force':1})
+ self.check_operations_history(stm_should_break_transaction=1)
def test_debug_merge_points(self):
myjitdriver = JitDriver(greens = ['a'], reds = ['x', 'res'])
diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py
--- a/rpython/rlib/jit.py
+++ b/rpython/rlib/jit.py
@@ -483,12 +483,14 @@
name = 'jitdriver'
inline_jit_merge_point = False
_store_last_enter_jit = None
+ stm_report_location = None
def __init__(self, greens=None, reds=None, virtualizables=None,
get_jitcell_at=None, set_jitcell_at=None,
get_printable_location=None, confirm_enter_jit=None,
can_never_inline=None, should_unroll_one_iteration=None,
- name='jitdriver', check_untranslated=True):
+ name='jitdriver', check_untranslated=True,
+ stm_report_location=None):
if greens is not None:
self.greens = greens
self.name = name
@@ -524,6 +526,8 @@
self.can_never_inline = can_never_inline
self.should_unroll_one_iteration = should_unroll_one_iteration
self.check_untranslated = check_untranslated
+ if stm_report_location is not None:
+ self.stm_report_location = stm_report_location
def _freeze_(self):
return True
diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py
--- a/rpython/rlib/rstm.py
+++ b/rpython/rlib/rstm.py
@@ -46,47 +46,13 @@
function with the interpreter's dispatch loop, this must be called
(it turns into a marker in the caller's function). There is one
automatically in any jit.jit_merge_point()."""
- # special-cased below
+ # special-cased below: the emitted operation must be placed
+ # directly in the caller's graph
def possible_transaction_break():
if stm_is_enabled():
if llop.stm_should_break_transaction(lltype.Bool):
- llop.stm_transaction_break(lltype.Void)
-
-# Typical usage of the following two functions:
-#
-# just after jit_merge_point:
-# if rstm.jit_stm_should_break_transaction(False):
-# rstm.jit_stm_transaction_break_point()
-#
-# just before can_enter_jit:
-# if rstm.jit_stm_should_break_transaction(True):
-# rstm.jit_stm_transaction_break_point()
-#
-# resulting JIT trace (common case):
-# ...
-# call_release_gil(...)
-# stm_transaction_break(0) # in-line, because we expect "inevitable"
-# guard_not_forced()
-# ...
-# i1 = stm_should_break_transaction()
-# guard_false(i1) # out-of-line, because rarely needed
-# jump()
-#
-def jit_stm_transaction_break_point():
- # XXX REFACTOR AWAY
- if we_are_translated():
- llop.jit_stm_transaction_break_point(lltype.Void)
-
- at specialize.arg(0)
-def jit_stm_should_break_transaction(if_there_is_no_other):
- # XXX REFACTOR AWAY
- # if_there_is_no_other means that we use this point only
- # if there is no other break point in the trace.
- # If it is False, the point may be used if it comes right
- # after a CALL_RELEASE_GIL
- return llop.jit_stm_should_break_transaction(lltype.Bool,
- if_there_is_no_other)
+ break_transaction()
def hint_commit_soon():
"""As the name says, just a hint. Maybe calling it
@@ -104,7 +70,6 @@
def partial_commit_and_resume_other_threads():
pass # for now
- at dont_look_inside
def should_break_transaction():
return we_are_translated() and (
llop.stm_should_break_transaction(lltype.Bool))
diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py
--- a/rpython/rtyper/llinterp.py
+++ b/rpython/rtyper/llinterp.py
@@ -943,7 +943,6 @@
op_stm_initialize = _stm_not_implemented
op_stm_finalize = _stm_not_implemented
op_stm_perform_transaction = _stm_not_implemented
- op_stm_should_break_transaction = _stm_not_implemented
op_stm_commit_transaction = _stm_not_implemented
op_stm_begin_inevitable_transaction = _stm_not_implemented
op_stm_barrier = _stm_not_implemented
@@ -971,6 +970,9 @@
op_stm_stop_all_other_threads = _stm_not_implemented
op_stm_partial_commit_and_resume_other_threads = _stm_not_implemented
+ def op_stm_should_break_transaction(self):
+ return False
+
def op_threadlocalref_set(self, key, value):
try:
d = self.llinterpreter.tlrefsdict
diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py
--- a/rpython/rtyper/lltypesystem/lloperation.py
+++ b/rpython/rtyper/lltypesystem/lloperation.py
@@ -436,7 +436,7 @@
'stm_leave_callback_call': LLOp(),
'stm_transaction_break': LLOp(canmallocgc=True),
'stm_should_break_transaction': LLOp(sideeffects=False),
- 'stm_rewind_jmp_frame': LLOp(),
+ 'stm_rewind_jmp_frame': LLOp(canrun=True),
'stm_set_transaction_length': LLOp(),
'stm_hint_commit_soon': LLOp(canrun=True),
@@ -521,8 +521,6 @@
'jit_assembler_call': LLOp(canrun=True, # similar to an 'indirect_call'
canraise=(Exception,),
canmallocgc=True),
- 'jit_stm_transaction_break_point' : LLOp(canrun=True,canmallocgc=True),
- 'jit_stm_should_break_transaction' : LLOp(canrun=True),
# __________ GC operations __________
diff --git a/rpython/rtyper/lltypesystem/opimpl.py b/rpython/rtyper/lltypesystem/opimpl.py
--- a/rpython/rtyper/lltypesystem/opimpl.py
+++ b/rpython/rtyper/lltypesystem/opimpl.py
@@ -715,10 +715,7 @@
def op_jit_assembler_call(funcptr, *args):
return funcptr(*args)
-def op_jit_stm_should_break_transaction(if_there_is_no_other):
- return False
-
-def op_jit_stm_transaction_break_point():
+def op_stm_rewind_jmp_frame():
pass
def op_stm_hint_commit_soon():
diff --git a/rpython/translator/stm/breakfinder.py b/rpython/translator/stm/breakfinder.py
--- a/rpython/translator/stm/breakfinder.py
+++ b/rpython/translator/stm/breakfinder.py
@@ -7,7 +7,6 @@
'stm_start_if_not_atomic',
#'stm_partial_commit_and_resume_other_threads', # new priv_revision
#'jit_assembler_call',
- #'jit_stm_transaction_break_point',
'stm_enter_callback_call',
'stm_leave_callback_call',
])
diff --git a/rpython/translator/stm/inevitable.py b/rpython/translator/stm/inevitable.py
--- a/rpython/translator/stm/inevitable.py
+++ b/rpython/translator/stm/inevitable.py
@@ -18,8 +18,7 @@
'gc_adr_of_root_stack_top', 'gc_add_memory_pressure',
'weakref_create', 'weakref_deref',
'jit_assembler_call', 'gc_writebarrier',
- 'shrink_array', 'jit_stm_transaction_break_point',
- 'jit_stm_should_break_transaction',
+ 'shrink_array',
'threadlocalref_get', 'threadlocalref_set',
])
ALWAYS_ALLOW_OPERATIONS |= set(lloperation.enum_tryfold_ops())
From noreply at buildbot.pypy.org Sun Aug 17 19:36:59 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Sun, 17 Aug 2014 19:36:59 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Fix tlc.
Message-ID: <20140817173659.C06D21C347F@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72847:e9196681b65b
Date: 2014-08-17 18:20 +0200
http://bitbucket.org/pypy/pypy/changeset/e9196681b65b/
Log: Fix tlc.
diff --git a/rpython/jit/tl/tlc.py b/rpython/jit/tl/tlc.py
--- a/rpython/jit/tl/tlc.py
+++ b/rpython/jit/tl/tlc.py
@@ -229,8 +229,7 @@
def make_interp(supports_call, jitted=True):
myjitdriver = JitDriver(greens = ['pc', 'code'],
- reds = ['frame', 'pool'],
- stm_do_transaction_breaks=True)
+ reds = ['frame', 'pool'])
def interp(code='', pc=0, inputarg=0, pool=None):
if not isinstance(code,str):
@@ -250,9 +249,6 @@
if jitted:
myjitdriver.jit_merge_point(frame=frame,
code=code, pc=pc, pool=pool)
- # nothing inbetween!
- if rstm.jit_stm_should_break_transaction(False):
- rstm.jit_stm_transaction_break_point()
opcode = ord(code[pc])
pc += 1
stack = frame.stack
@@ -353,8 +349,7 @@
pc += char2int(code[pc])
pc += 1
if jitted and old_pc > pc:
- if rstm.jit_stm_should_break_transaction(True):
- rstm.jit_stm_transaction_break_point()
+ rstm.possible_transaction_break()
myjitdriver.can_enter_jit(code=code, pc=pc, frame=frame,
pool=pool)
@@ -364,8 +359,7 @@
old_pc = pc
pc += char2int(code[pc]) + 1
if jitted and old_pc > pc:
- if rstm.jit_stm_should_break_transaction(True):
- rstm.jit_stm_transaction_break_point()
+ rstm.possible_transaction_break()
myjitdriver.can_enter_jit(code=code, pc=pc, frame=frame,
pool=pool)
else:
@@ -377,8 +371,7 @@
old_pc = pc
pc += offset
if jitted and old_pc > pc:
- if rstm.jit_stm_should_break_transaction(True):
- rstm.jit_stm_transaction_break_point()
+ rstm.possible_transaction_break()
myjitdriver.can_enter_jit(code=code, pc=pc, frame=frame,
pool=pool)
From noreply at buildbot.pypy.org Sun Aug 17 19:37:00 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Sun, 17 Aug 2014 19:37:00 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Revert this change
Message-ID: <20140817173700.ECDF11C347F@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72848:778a09fd4f39
Date: 2014-08-17 18:22 +0200
http://bitbucket.org/pypy/pypy/changeset/778a09fd4f39/
Log: Revert this change
diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py
--- a/rpython/jit/metainterp/history.py
+++ b/rpython/jit/metainterp/history.py
@@ -759,10 +759,9 @@
# ____________________________________________________________
class History(object):
- def __init__(self, metainterp_sd):
+ def __init__(self):
self.inputargs = None
self.operations = []
- self.config = metainterp_sd.config
self.stm_location = None
def record(self, opnum, argboxes, resbox, descr=None):
diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
--- a/rpython/jit/metainterp/pyjitpl.py
+++ b/rpython/jit/metainterp/pyjitpl.py
@@ -1849,7 +1849,7 @@
self.framestack[-1].pc = saved_pc
def create_empty_history(self):
- self.history = history.History(self.staticdata)
+ self.history = history.History()
self.staticdata.stats.set_history(self.history)
def _all_constants(self, *boxes):
@@ -2453,7 +2453,7 @@
rstack._stack_criticalcode_start()
try:
self.portal_call_depth = -1 # always one portal around
- self.history = history.History(self.staticdata)
+ self.history = history.History()
inputargs_and_holes = self.rebuild_state_after_failure(resumedescr,
deadframe)
self.history.inputargs = [box for box in inputargs_and_holes if box]
From noreply at buildbot.pypy.org Sun Aug 17 19:37:02 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Sun, 17 Aug 2014 19:37:02 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Oups. Looks like the merge
in 22f849260e70 did that wrong.
Message-ID: <20140817173702.1D18C1C347F@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72849:5034d5a420d3
Date: 2014-08-17 18:29 +0200
http://bitbucket.org/pypy/pypy/changeset/5034d5a420d3/
Log: Oups. Looks like the merge in 22f849260e70 did that wrong.
diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py
--- a/rpython/rlib/rgc.py
+++ b/rpython/rlib/rgc.py
@@ -242,7 +242,6 @@
i = 0
while i < length:
copy_item(source, dest, i + source_start, i + dest_start)
- dest[i + dest_start] = source[i + source_start]
i += 1
return
source_addr = llmemory.cast_ptr_to_adr(source)
From noreply at buildbot.pypy.org Sun Aug 17 19:37:03 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Sun, 17 Aug 2014 19:37:03 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Remove outdated test
Message-ID: <20140817173703.3E21E1C347F@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72850:61b69f802758
Date: 2014-08-17 18:30 +0200
http://bitbucket.org/pypy/pypy/changeset/61b69f802758/
Log: Remove outdated test
diff --git a/rpython/jit/metainterp/test/test_heapcache.py b/rpython/jit/metainterp/test/test_heapcache.py
--- a/rpython/jit/metainterp/test/test_heapcache.py
+++ b/rpython/jit/metainterp/test/test_heapcache.py
@@ -534,30 +534,6 @@
)
assert h.getarrayitem(box1, index1, descr1) is box3
- def test_stm_break(self):
- h = HeapCache()
- assert h.stm_break_wanted
- h.stm_break_done()
- assert not h.stm_break_wanted
- # loop headers
- h.reset()
- assert h.stm_break_wanted
- h.stm_break_done()
- assert not h.stm_break_wanted
- # call that may make the transaction inevitable
- h.invalidate_caches(
- rop.CALL, FakeCallDescr(FakeEffectinfo.EF_RANDOM_EFFECTS), [box1]
- )
- assert h.stm_break_wanted
- h.stm_break_done()
- # unknown op
- h.invalidate_caches(rop.JIT_DEBUG, None, [box1, lengthbox2, box2])
- assert h.stm_break_wanted
- h.stm_break_done()
- # GUARD_NOT_FORCED
- h.invalidate_caches(rop.GUARD_NOT_FORCED, None, [])
- assert h.stm_break_wanted
-
def test_bug_missing_ignored_operations(self):
h = HeapCache()
h.new(box1)
From noreply at buildbot.pypy.org Sun Aug 17 19:37:04 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Sun, 17 Aug 2014 19:37:04 +0200 (CEST)
Subject: [pypy-commit] pypy default: From stmgc-c7: put the "# Loop" and "#
bridge" header in more dumped
Message-ID: <20140817173704.5E8111C347F@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r72851:a134c3074fb6
Date: 2014-08-17 18:41 +0200
http://bitbucket.org/pypy/pypy/changeset/a134c3074fb6/
Log: From stmgc-c7: put the "# Loop" and "# bridge" header in more dumped
logs than just the "jit-log-opt" one.
diff --git a/rpython/jit/metainterp/logger.py b/rpython/jit/metainterp/logger.py
--- a/rpython/jit/metainterp/logger.py
+++ b/rpython/jit/metainterp/logger.py
@@ -15,10 +15,14 @@
def log_loop(self, inputargs, operations, number=0, type=None, ops_offset=None, name=''):
if type is None:
debug_start("jit-log-noopt-loop")
+ debug_print("# Loop", number, '(%s)' % name, ":", "noopt",
+ "with", len(operations), "ops")
logops = self._log_operations(inputargs, operations, ops_offset)
debug_stop("jit-log-noopt-loop")
elif type == "rewritten":
debug_start("jit-log-rewritten-loop")
+ debug_print("# Loop", number, '(%s)' % name, ":", type,
+ "with", len(operations), "ops")
logops = self._log_operations(inputargs, operations, ops_offset)
debug_stop("jit-log-rewritten-loop")
elif number == -2:
@@ -37,12 +41,18 @@
descr=None, ops_offset=None):
if extra == "noopt":
debug_start("jit-log-noopt-bridge")
+ debug_print("# bridge out of Guard",
+ "0x%x" % compute_unique_id(descr),
+ "with", len(operations), "ops")
logops = self._log_operations(inputargs, operations, ops_offset)
debug_stop("jit-log-noopt-bridge")
elif extra == "rewritten":
debug_start("jit-log-rewritten-bridge")
+ debug_print("# bridge out of Guard",
+ "0x%x" % compute_unique_id(descr),
+ "with", len(operations), "ops")
logops = self._log_operations(inputargs, operations, ops_offset)
- debug_stop("jit-log-rewritten-bridge")
+ debug_stop("jit-log-rewritten-bridge")
elif extra == "compiling":
debug_start("jit-log-compiling-bridge")
logops = self._log_operations(inputargs, operations, ops_offset)
diff --git a/rpython/jit/metainterp/test/test_logger.py b/rpython/jit/metainterp/test/test_logger.py
--- a/rpython/jit/metainterp/test/test_logger.py
+++ b/rpython/jit/metainterp/test/test_logger.py
@@ -32,10 +32,11 @@
return log_stream.getvalue()
class Logger(logger.Logger):
- def log_loop(self, loop, namespace={}, ops_offset=None):
+ def log_loop(self, loop, namespace={}, ops_offset=None, name=''):
self.namespace = namespace
return capturing(logger.Logger.log_loop, self,
- loop.inputargs, loop.operations, ops_offset=ops_offset)
+ loop.inputargs, loop.operations, ops_offset=ops_offset,
+ name=name)
def _make_log_operations(self1):
class LogOperations(logger.LogOperations):
@@ -230,8 +231,9 @@
None: 40
}
logger = Logger(self.make_metainterp_sd())
- output = logger.log_loop(loop, ops_offset=ops_offset)
+ output = logger.log_loop(loop, ops_offset=ops_offset, name="foo")
assert output.strip() == """
+# Loop 0 (foo) : noopt with 3 ops
[i0]
+10: i2 = int_add(i0, 1)
i4 = int_mul(i2, 2)
From noreply at buildbot.pypy.org Sun Aug 17 19:37:07 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Sun, 17 Aug 2014 19:37:07 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: hg merge default
Message-ID: <20140817173707.714F91C347F@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72852:e10ea4772e19
Date: 2014-08-17 18:45 +0200
http://bitbucket.org/pypy/pypy/changeset/e10ea4772e19/
Log: hg merge default
diff too long, truncating to 2000 out of 14840 lines
diff --git a/_pytest/__init__.py b/_pytest/__init__.py
--- a/_pytest/__init__.py
+++ b/_pytest/__init__.py
@@ -1,2 +1,2 @@
#
-__version__ = '2.2.4.dev2'
+__version__ = '2.5.2'
diff --git a/_pytest/_argcomplete.py b/_pytest/_argcomplete.py
new file mode 100644
--- /dev/null
+++ b/_pytest/_argcomplete.py
@@ -0,0 +1,104 @@
+
+"""allow bash-completion for argparse with argcomplete if installed
+needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail
+to find the magic string, so _ARGCOMPLETE env. var is never set, and
+this does not need special code.
+
+argcomplete does not support python 2.5 (although the changes for that
+are minor).
+
+Function try_argcomplete(parser) should be called directly before
+the call to ArgumentParser.parse_args().
+
+The filescompleter is what you normally would use on the positional
+arguments specification, in order to get "dirname/" after "dirn"
+instead of the default "dirname ":
+
+ optparser.add_argument(Config._file_or_dir, nargs='*'
+ ).completer=filescompleter
+
+Other, application specific, completers should go in the file
+doing the add_argument calls as they need to be specified as .completer
+attributes as well. (If argcomplete is not installed, the function the
+attribute points to will not be used).
+
+SPEEDUP
+=======
+The generic argcomplete script for bash-completion
+(/etc/bash_completion.d/python-argcomplete.sh )
+uses a python program to determine startup script generated by pip.
+You can speed up completion somewhat by changing this script to include
+ # PYTHON_ARGCOMPLETE_OK
+so the the python-argcomplete-check-easy-install-script does not
+need to be called to find the entry point of the code and see if that is
+marked with PYTHON_ARGCOMPLETE_OK
+
+INSTALL/DEBUGGING
+=================
+To include this support in another application that has setup.py generated
+scripts:
+- add the line:
+ # PYTHON_ARGCOMPLETE_OK
+ near the top of the main python entry point
+- include in the file calling parse_args():
+ from _argcomplete import try_argcomplete, filescompleter
+ , call try_argcomplete just before parse_args(), and optionally add
+ filescompleter to the positional arguments' add_argument()
+If things do not work right away:
+- switch on argcomplete debugging with (also helpful when doing custom
+ completers):
+ export _ARC_DEBUG=1
+- run:
+ python-argcomplete-check-easy-install-script $(which appname)
+ echo $?
+ will echo 0 if the magic line has been found, 1 if not
+- sometimes it helps to find early on errors using:
+ _ARGCOMPLETE=1 _ARC_DEBUG=1 appname
+ which should throw a KeyError: 'COMPLINE' (which is properly set by the
+ global argcomplete script).
+"""
+
+import sys
+import os
+from glob import glob
+
+class FastFilesCompleter:
+ 'Fast file completer class'
+ def __init__(self, directories=True):
+ self.directories = directories
+
+ def __call__(self, prefix, **kwargs):
+ """only called on non option completions"""
+ if os.path.sep in prefix[1:]: #
+ prefix_dir = len(os.path.dirname(prefix) + os.path.sep)
+ else:
+ prefix_dir = 0
+ completion = []
+ globbed = []
+ if '*' not in prefix and '?' not in prefix:
+ if prefix[-1] == os.path.sep: # we are on unix, otherwise no bash
+ globbed.extend(glob(prefix + '.*'))
+ prefix += '*'
+ globbed.extend(glob(prefix))
+ for x in sorted(globbed):
+ if os.path.isdir(x):
+ x += '/'
+ # append stripping the prefix (like bash, not like compgen)
+ completion.append(x[prefix_dir:])
+ return completion
+
+if os.environ.get('_ARGCOMPLETE'):
+ # argcomplete 0.5.6 is not compatible with python 2.5.6: print/with/format
+ if sys.version_info[:2] < (2, 6):
+ sys.exit(1)
+ try:
+ import argcomplete.completers
+ except ImportError:
+ sys.exit(-1)
+ filescompleter = FastFilesCompleter()
+
+ def try_argcomplete(parser):
+ argcomplete.autocomplete(parser)
+else:
+ def try_argcomplete(parser): pass
+ filescompleter = None
diff --git a/_pytest/assertion/__init__.py b/_pytest/assertion/__init__.py
--- a/_pytest/assertion/__init__.py
+++ b/_pytest/assertion/__init__.py
@@ -3,7 +3,6 @@
"""
import py
import sys
-import pytest
from _pytest.monkeypatch import monkeypatch
from _pytest.assertion import util
@@ -19,8 +18,8 @@
to provide assert expression information. """)
group.addoption('--no-assert', action="store_true", default=False,
dest="noassert", help="DEPRECATED equivalent to --assert=plain")
- group.addoption('--nomagic', action="store_true", default=False,
- dest="nomagic", help="DEPRECATED equivalent to --assert=plain")
+ group.addoption('--nomagic', '--no-magic', action="store_true",
+ default=False, help="DEPRECATED equivalent to --assert=plain")
class AssertionState:
"""State for the assertion plugin."""
@@ -35,22 +34,25 @@
mode = "plain"
if mode == "rewrite":
try:
- import ast
+ import ast # noqa
except ImportError:
mode = "reinterp"
else:
- if sys.platform.startswith('java'):
+ # Both Jython and CPython 2.6.0 have AST bugs that make the
+ # assertion rewriting hook malfunction.
+ if (sys.platform.startswith('java') or
+ sys.version_info[:3] == (2, 6, 0)):
mode = "reinterp"
if mode != "plain":
_load_modules(mode)
m = monkeypatch()
config._cleanup.append(m.undo)
m.setattr(py.builtin.builtins, 'AssertionError',
- reinterpret.AssertionError)
+ reinterpret.AssertionError) # noqa
hook = None
if mode == "rewrite":
- hook = rewrite.AssertionRewritingHook()
- sys.meta_path.append(hook)
+ hook = rewrite.AssertionRewritingHook() # noqa
+ sys.meta_path.insert(0, hook)
warn_about_missing_assertion(mode)
config._assertstate = AssertionState(config, mode)
config._assertstate.hook = hook
@@ -73,9 +75,16 @@
def callbinrepr(op, left, right):
hook_result = item.ihook.pytest_assertrepr_compare(
config=item.config, op=op, left=left, right=right)
+
for new_expl in hook_result:
if new_expl:
- res = '\n~'.join(new_expl)
+ # Don't include pageloads of data unless we are very
+ # verbose (-vv)
+ if (sum(len(p) for p in new_expl[1:]) > 80*8
+ and item.config.option.verbose < 2):
+ new_expl[1:] = [py.builtin._totext(
+ 'Detailed information truncated, use "-vv" to show')]
+ res = py.builtin._totext('\n~').join(new_expl)
if item.config.getvalue("assertmode") == "rewrite":
# The result will be fed back a python % formatting
# operation, which will fail if there are extraneous
@@ -95,9 +104,9 @@
def _load_modules(mode):
"""Lazily import assertion related code."""
global rewrite, reinterpret
- from _pytest.assertion import reinterpret
+ from _pytest.assertion import reinterpret # noqa
if mode == "rewrite":
- from _pytest.assertion import rewrite
+ from _pytest.assertion import rewrite # noqa
def warn_about_missing_assertion(mode):
try:
diff --git a/_pytest/assertion/newinterpret.py b/_pytest/assertion/newinterpret.py
--- a/_pytest/assertion/newinterpret.py
+++ b/_pytest/assertion/newinterpret.py
@@ -11,7 +11,7 @@
from _pytest.assertion.reinterpret import BuiltinAssertionError
-if sys.platform.startswith("java") and sys.version_info < (2, 5, 2):
+if sys.platform.startswith("java"):
# See http://bugs.jython.org/issue1497
_exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict",
"ListComp", "GeneratorExp", "Yield", "Compare", "Call",
diff --git a/_pytest/assertion/oldinterpret.py b/_pytest/assertion/oldinterpret.py
--- a/_pytest/assertion/oldinterpret.py
+++ b/_pytest/assertion/oldinterpret.py
@@ -526,10 +526,13 @@
# example:
def f():
return 5
+
def g():
return 3
+
def h(x):
return 'never'
+
check("f() * g() == 5")
check("not f()")
check("not (f() and g() or 0)")
diff --git a/_pytest/assertion/reinterpret.py b/_pytest/assertion/reinterpret.py
--- a/_pytest/assertion/reinterpret.py
+++ b/_pytest/assertion/reinterpret.py
@@ -1,18 +1,26 @@
import sys
import py
from _pytest.assertion.util import BuiltinAssertionError
+u = py.builtin._totext
+
class AssertionError(BuiltinAssertionError):
def __init__(self, *args):
BuiltinAssertionError.__init__(self, *args)
if args:
+ # on Python2.6 we get len(args)==2 for: assert 0, (x,y)
+ # on Python2.7 and above we always get len(args) == 1
+ # with args[0] being the (x,y) tuple.
+ if len(args) > 1:
+ toprint = args
+ else:
+ toprint = args[0]
try:
- self.msg = str(args[0])
- except py.builtin._sysex:
- raise
- except:
- self.msg = "<[broken __repr__] %s at %0xd>" %(
- args[0].__class__, id(args[0]))
+ self.msg = u(toprint)
+ except Exception:
+ self.msg = u(
+ "<[broken __repr__] %s at %0xd>"
+ % (toprint.__class__, id(toprint)))
else:
f = py.code.Frame(sys._getframe(1))
try:
@@ -44,4 +52,3 @@
from _pytest.assertion.newinterpret import interpret as reinterpret
else:
reinterpret = reinterpret_old
-
diff --git a/_pytest/assertion/rewrite.py b/_pytest/assertion/rewrite.py
--- a/_pytest/assertion/rewrite.py
+++ b/_pytest/assertion/rewrite.py
@@ -6,6 +6,7 @@
import imp
import marshal
import os
+import re
import struct
import sys
import types
@@ -14,13 +15,7 @@
from _pytest.assertion import util
-# Windows gives ENOENT in places *nix gives ENOTDIR.
-if sys.platform.startswith("win"):
- PATH_COMPONENT_NOT_DIR = errno.ENOENT
-else:
- PATH_COMPONENT_NOT_DIR = errno.ENOTDIR
-
-# py.test caches rewritten pycs in __pycache__.
+# pytest caches rewritten pycs in __pycache__.
if hasattr(imp, "get_tag"):
PYTEST_TAG = imp.get_tag() + "-PYTEST"
else:
@@ -34,17 +29,19 @@
PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1])
del ver, impl
-PYC_EXT = ".py" + "c" if __debug__ else "o"
+PYC_EXT = ".py" + (__debug__ and "c" or "o")
PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2)
+ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3
class AssertionRewritingHook(object):
- """Import hook which rewrites asserts."""
+ """PEP302 Import hook which rewrites asserts."""
def __init__(self):
self.session = None
self.modules = {}
+ self._register_with_pkg_resources()
def set_session(self, session):
self.fnpats = session.config.getini("python_files")
@@ -59,8 +56,12 @@
names = name.rsplit(".", 1)
lastname = names[-1]
pth = None
- if path is not None and len(path) == 1:
- pth = path[0]
+ if path is not None:
+ # Starting with Python 3.3, path is a _NamespacePath(), which
+ # causes problems if not converted to list.
+ path = list(path)
+ if len(path) == 1:
+ pth = path[0]
if pth is None:
try:
fd, fn, desc = imp.find_module(lastname, path)
@@ -95,12 +96,13 @@
finally:
self.session = sess
else:
- state.trace("matched test file (was specified on cmdline): %r" % (fn,))
+ state.trace("matched test file (was specified on cmdline): %r" %
+ (fn,))
# The requested module looks like a test file, so rewrite it. This is
# the most magical part of the process: load the source, rewrite the
# asserts, and load the rewritten source. We also cache the rewritten
# module code in a special pyc. We must be aware of the possibility of
- # concurrent py.test processes rewriting and loading pycs. To avoid
+ # concurrent pytest processes rewriting and loading pycs. To avoid
# tricky race conditions, we maintain the following invariant: The
# cached pyc is always a complete, valid pyc. Operations on it must be
# atomic. POSIX's atomic rename comes in handy.
@@ -116,19 +118,19 @@
# common case) or it's blocked by a non-dir node. In the
# latter case, we'll ignore it in _write_pyc.
pass
- elif e == PATH_COMPONENT_NOT_DIR:
+ elif e in [errno.ENOENT, errno.ENOTDIR]:
# One of the path components was not a directory, likely
# because we're in a zip file.
write = False
elif e == errno.EACCES:
- state.trace("read only directory: %r" % (fn_pypath.dirname,))
+ state.trace("read only directory: %r" % fn_pypath.dirname)
write = False
else:
raise
cache_name = fn_pypath.basename[:-3] + PYC_TAIL
pyc = os.path.join(cache_dir, cache_name)
- # Notice that even if we're in a read-only directory, I'm going to check
- # for a cached pyc. This may not be optimal...
+ # Notice that even if we're in a read-only directory, I'm going
+ # to check for a cached pyc. This may not be optimal...
co = _read_pyc(fn_pypath, pyc)
if co is None:
state.trace("rewriting %r" % (fn,))
@@ -153,27 +155,59 @@
mod.__file__ = co.co_filename
# Normally, this attribute is 3.2+.
mod.__cached__ = pyc
+ mod.__loader__ = self
py.builtin.exec_(co, mod.__dict__)
except:
del sys.modules[name]
raise
return sys.modules[name]
-def _write_pyc(co, source_path, pyc):
- # Technically, we don't have to have the same pyc format as (C)Python, since
- # these "pycs" should never be seen by builtin import. However, there's
- # little reason deviate, and I hope sometime to be able to use
- # imp.load_compiled to load them. (See the comment in load_module above.)
+
+
+ def is_package(self, name):
+ try:
+ fd, fn, desc = imp.find_module(name)
+ except ImportError:
+ return False
+ if fd is not None:
+ fd.close()
+ tp = desc[2]
+ return tp == imp.PKG_DIRECTORY
+
+ @classmethod
+ def _register_with_pkg_resources(cls):
+ """
+ Ensure package resources can be loaded from this loader. May be called
+ multiple times, as the operation is idempotent.
+ """
+ try:
+ import pkg_resources
+ # access an attribute in case a deferred importer is present
+ pkg_resources.__name__
+ except ImportError:
+ return
+
+ # Since pytest tests are always located in the file system, the
+ # DefaultProvider is appropriate.
+ pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider)
+
+
+def _write_pyc(state, co, source_path, pyc):
+ # Technically, we don't have to have the same pyc format as
+ # (C)Python, since these "pycs" should never be seen by builtin
+ # import. However, there's little reason deviate, and I hope
+ # sometime to be able to use imp.load_compiled to load them. (See
+ # the comment in load_module above.)
mtime = int(source_path.mtime())
try:
fp = open(pyc, "wb")
except IOError:
err = sys.exc_info()[1].errno
- if err == PATH_COMPONENT_NOT_DIR:
- # This happens when we get a EEXIST in find_module creating the
- # __pycache__ directory and __pycache__ is by some non-dir node.
- return False
- raise
+ state.trace("error writing pyc file at %s: errno=%s" %(pyc, err))
+ # we ignore any failure to write the cache file
+ # there are many reasons, permission-denied, __pycache__ being a
+ # file etc.
+ return False
try:
fp.write(imp.get_magic())
fp.write(struct.pack(">",
- ast.Add : "+",
- ast.Sub : "-",
- ast.Mult : "*",
- ast.Div : "/",
- ast.FloorDiv : "//",
- ast.Mod : "%",
- ast.Eq : "==",
- ast.NotEq : "!=",
- ast.Lt : "<",
- ast.LtE : "<=",
- ast.Gt : ">",
- ast.GtE : ">=",
- ast.Pow : "**",
- ast.Is : "is",
- ast.IsNot : "is not",
- ast.In : "in",
- ast.NotIn : "not in"
+ ast.BitOr: "|",
+ ast.BitXor: "^",
+ ast.BitAnd: "&",
+ ast.LShift: "<<",
+ ast.RShift: ">>",
+ ast.Add: "+",
+ ast.Sub: "-",
+ ast.Mult: "*",
+ ast.Div: "/",
+ ast.FloorDiv: "//",
+ ast.Mod: "%%", # escaped for string formatting
+ ast.Eq: "==",
+ ast.NotEq: "!=",
+ ast.Lt: "<",
+ ast.LtE: "<=",
+ ast.Gt: ">",
+ ast.GtE: ">=",
+ ast.Pow: "**",
+ ast.Is: "is",
+ ast.IsNot: "is not",
+ ast.In: "in",
+ ast.NotIn: "not in"
}
@@ -341,7 +408,7 @@
lineno = 0
for item in mod.body:
if (expect_docstring and isinstance(item, ast.Expr) and
- isinstance(item.value, ast.Str)):
+ isinstance(item.value, ast.Str)):
doc = item.value.s
if "PYTEST_DONT_REWRITE" in doc:
# The module has disabled assertion rewriting.
@@ -462,7 +529,8 @@
body.append(raise_)
# Clear temporary variables by setting them to None.
if self.variables:
- variables = [ast.Name(name, ast.Store()) for name in self.variables]
+ variables = [ast.Name(name, ast.Store())
+ for name in self.variables]
clear = ast.Assign(variables, ast.Name("None", ast.Load()))
self.statements.append(clear)
# Fix line numbers.
@@ -471,11 +539,12 @@
return self.statements
def visit_Name(self, name):
- # Check if the name is local or not.
+ # Display the repr of the name if it's a local variable or
+ # _should_repr_global_name() thinks it's acceptable.
locs = ast.Call(self.builtin("locals"), [], [], None, None)
- globs = ast.Call(self.builtin("globals"), [], [], None, None)
- ops = [ast.In(), ast.IsNot()]
- test = ast.Compare(ast.Str(name.id), ops, [locs, globs])
+ inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
+ dorepr = self.helper("should_repr_global_name", name)
+ test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
return name, self.explanation_param(expr)
@@ -492,7 +561,8 @@
for i, v in enumerate(boolop.values):
if i:
fail_inner = []
- self.on_failure.append(ast.If(cond, fail_inner, []))
+ # cond is set in a prior loop iteration below
+ self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa
self.on_failure = fail_inner
self.push_format_context()
res, expl = self.visit(v)
@@ -548,7 +618,8 @@
new_kwarg, expl = self.visit(call.kwargs)
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
- new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg)
+ new_call = ast.Call(new_func, new_args, new_kwargs,
+ new_star, new_kwarg)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
@@ -584,7 +655,7 @@
res_expr = ast.Compare(left_res, [op], [next_res])
self.statements.append(ast.Assign([store_names[i]], res_expr))
left_res, left_expl = next_res, next_expl
- # Use py.code._reprcompare if that's available.
+ # Use pytest.assertion.util._reprcompare if that's available.
expl_call = self.helper("call_reprcompare",
ast.Tuple(syms, ast.Load()),
ast.Tuple(load_names, ast.Load()),
diff --git a/_pytest/assertion/util.py b/_pytest/assertion/util.py
--- a/_pytest/assertion/util.py
+++ b/_pytest/assertion/util.py
@@ -1,8 +1,13 @@
"""Utilities for assertion debugging"""
import py
+try:
+ from collections import Sequence
+except ImportError:
+ Sequence = list
BuiltinAssertionError = py.builtin.builtins.AssertionError
+u = py.builtin._totext
# The _reprcompare attribute on the util module is used by the new assertion
# interpretation code and assertion rewriter to detect this plugin was
@@ -10,6 +15,7 @@
# DebugInterpreter.
_reprcompare = None
+
def format_explanation(explanation):
"""This formats an explanation
@@ -20,7 +26,18 @@
for when one explanation needs to span multiple lines, e.g. when
displaying diffs.
"""
- # simplify 'assert False where False = ...'
+ explanation = _collapse_false(explanation)
+ lines = _split_explanation(explanation)
+ result = _format_lines(lines)
+ return u('\n').join(result)
+
+
+def _collapse_false(explanation):
+ """Collapse expansions of False
+
+ So this strips out any "assert False\n{where False = ...\n}"
+ blocks.
+ """
where = 0
while True:
start = where = explanation.find("False\n{False = ", where)
@@ -42,28 +59,48 @@
explanation = (explanation[:start] + explanation[start+15:end-1] +
explanation[end+1:])
where -= 17
- raw_lines = (explanation or '').split('\n')
- # escape newlines not followed by {, } and ~
+ return explanation
+
+
+def _split_explanation(explanation):
+ """Return a list of individual lines in the explanation
+
+ This will return a list of lines split on '\n{', '\n}' and '\n~'.
+ Any other newlines will be escaped and appear in the line as the
+ literal '\n' characters.
+ """
+ raw_lines = (explanation or u('')).split('\n')
lines = [raw_lines[0]]
for l in raw_lines[1:]:
if l.startswith('{') or l.startswith('}') or l.startswith('~'):
lines.append(l)
else:
lines[-1] += '\\n' + l
+ return lines
+
+def _format_lines(lines):
+ """Format the individual lines
+
+ This will replace the '{', '}' and '~' characters of our mini
+ formatting language with the proper 'where ...', 'and ...' and ' +
+ ...' text, taking care of indentation along the way.
+
+ Return a list of formatted lines.
+ """
result = lines[:1]
stack = [0]
stackcnt = [0]
for line in lines[1:]:
if line.startswith('{'):
if stackcnt[-1]:
- s = 'and '
+ s = u('and ')
else:
- s = 'where '
+ s = u('where ')
stack.append(len(result))
stackcnt[-1] += 1
stackcnt.append(0)
- result.append(' +' + ' '*(len(stack)-1) + s + line[1:])
+ result.append(u(' +') + u(' ')*(len(stack)-1) + s + line[1:])
elif line.startswith('}'):
assert line.startswith('}')
stack.pop()
@@ -71,9 +108,9 @@
result[stack[-1]] += line[1:]
else:
assert line.startswith('~')
- result.append(' '*len(stack) + line[1:])
+ result.append(u(' ')*len(stack) + line[1:])
assert len(stack) == 1
- return '\n'.join(result)
+ return result
# Provide basestring in python3
@@ -83,132 +120,163 @@
basestring = str
-def assertrepr_compare(op, left, right):
- """return specialised explanations for some operators/operands"""
- width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
+def assertrepr_compare(config, op, left, right):
+ """Return specialised explanations for some operators/operands"""
+ width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
left_repr = py.io.saferepr(left, maxsize=int(width/2))
right_repr = py.io.saferepr(right, maxsize=width-len(left_repr))
- summary = '%s %s %s' % (left_repr, op, right_repr)
+ summary = u('%s %s %s') % (left_repr, op, right_repr)
- issequence = lambda x: isinstance(x, (list, tuple))
+ issequence = lambda x: (isinstance(x, (list, tuple, Sequence))
+ and not isinstance(x, basestring))
istext = lambda x: isinstance(x, basestring)
isdict = lambda x: isinstance(x, dict)
- isset = lambda x: isinstance(x, set)
+ isset = lambda x: isinstance(x, (set, frozenset))
+ verbose = config.getoption('verbose')
explanation = None
try:
if op == '==':
if istext(left) and istext(right):
- explanation = _diff_text(left, right)
+ explanation = _diff_text(left, right, verbose)
elif issequence(left) and issequence(right):
- explanation = _compare_eq_sequence(left, right)
+ explanation = _compare_eq_sequence(left, right, verbose)
elif isset(left) and isset(right):
- explanation = _compare_eq_set(left, right)
+ explanation = _compare_eq_set(left, right, verbose)
elif isdict(left) and isdict(right):
- explanation = _diff_text(py.std.pprint.pformat(left),
- py.std.pprint.pformat(right))
+ explanation = _compare_eq_dict(left, right, verbose)
elif op == 'not in':
if istext(left) and istext(right):
- explanation = _notin_text(left, right)
- except py.builtin._sysex:
- raise
- except:
+ explanation = _notin_text(left, right, verbose)
+ except Exception:
excinfo = py.code.ExceptionInfo()
- explanation = ['(pytest_assertion plugin: representation of '
- 'details failed. Probably an object has a faulty __repr__.)',
- str(excinfo)
- ]
-
+ explanation = [
+ u('(pytest_assertion plugin: representation of details failed. '
+ 'Probably an object has a faulty __repr__.)'),
+ u(excinfo)]
if not explanation:
return None
- # Don't include pageloads of data, should be configurable
- if len(''.join(explanation)) > 80*8:
- explanation = ['Detailed information too verbose, truncated']
-
return [summary] + explanation
-def _diff_text(left, right):
- """Return the explanation for the diff between text
+def _diff_text(left, right, verbose=False):
+ """Return the explanation for the diff between text or bytes
- This will skip leading and trailing characters which are
- identical to keep the diff minimal.
+ Unless --verbose is used this will skip leading and trailing
+ characters which are identical to keep the diff minimal.
+
+ If the input are bytes they will be safely converted to text.
"""
explanation = []
- i = 0 # just in case left or right has zero length
- for i in range(min(len(left), len(right))):
- if left[i] != right[i]:
- break
- if i > 42:
- i -= 10 # Provide some context
- explanation = ['Skipping %s identical '
- 'leading characters in diff' % i]
- left = left[i:]
- right = right[i:]
- if len(left) == len(right):
- for i in range(len(left)):
- if left[-i] != right[-i]:
+ if isinstance(left, py.builtin.bytes):
+ left = u(repr(left)[1:-1]).replace(r'\n', '\n')
+ if isinstance(right, py.builtin.bytes):
+ right = u(repr(right)[1:-1]).replace(r'\n', '\n')
+ if not verbose:
+ i = 0 # just in case left or right has zero length
+ for i in range(min(len(left), len(right))):
+ if left[i] != right[i]:
break
if i > 42:
- i -= 10 # Provide some context
- explanation += ['Skipping %s identical '
- 'trailing characters in diff' % i]
- left = left[:-i]
- right = right[:-i]
+ i -= 10 # Provide some context
+ explanation = [u('Skipping %s identical leading '
+ 'characters in diff, use -v to show') % i]
+ left = left[i:]
+ right = right[i:]
+ if len(left) == len(right):
+ for i in range(len(left)):
+ if left[-i] != right[-i]:
+ break
+ if i > 42:
+ i -= 10 # Provide some context
+ explanation += [u('Skipping %s identical trailing '
+ 'characters in diff, use -v to show') % i]
+ left = left[:-i]
+ right = right[:-i]
explanation += [line.strip('\n')
for line in py.std.difflib.ndiff(left.splitlines(),
right.splitlines())]
return explanation
-def _compare_eq_sequence(left, right):
+def _compare_eq_sequence(left, right, verbose=False):
explanation = []
for i in range(min(len(left), len(right))):
if left[i] != right[i]:
- explanation += ['At index %s diff: %r != %r' %
- (i, left[i], right[i])]
+ explanation += [u('At index %s diff: %r != %r')
+ % (i, left[i], right[i])]
break
if len(left) > len(right):
- explanation += ['Left contains more items, '
- 'first extra item: %s' % py.io.saferepr(left[len(right)],)]
+ explanation += [u('Left contains more items, first extra item: %s')
+ % py.io.saferepr(left[len(right)],)]
elif len(left) < len(right):
- explanation += ['Right contains more items, '
- 'first extra item: %s' % py.io.saferepr(right[len(left)],)]
- return explanation # + _diff_text(py.std.pprint.pformat(left),
- # py.std.pprint.pformat(right))
+ explanation += [
+ u('Right contains more items, first extra item: %s') %
+ py.io.saferepr(right[len(left)],)]
+ return explanation # + _diff_text(py.std.pprint.pformat(left),
+ # py.std.pprint.pformat(right))
-def _compare_eq_set(left, right):
+def _compare_eq_set(left, right, verbose=False):
explanation = []
diff_left = left - right
diff_right = right - left
if diff_left:
- explanation.append('Extra items in the left set:')
+ explanation.append(u('Extra items in the left set:'))
for item in diff_left:
explanation.append(py.io.saferepr(item))
if diff_right:
- explanation.append('Extra items in the right set:')
+ explanation.append(u('Extra items in the right set:'))
for item in diff_right:
explanation.append(py.io.saferepr(item))
return explanation
-def _notin_text(term, text):
+def _compare_eq_dict(left, right, verbose=False):
+ explanation = []
+ common = set(left).intersection(set(right))
+ same = dict((k, left[k]) for k in common if left[k] == right[k])
+ if same and not verbose:
+ explanation += [u('Omitting %s identical items, use -v to show') %
+ len(same)]
+ elif same:
+ explanation += [u('Common items:')]
+ explanation += py.std.pprint.pformat(same).splitlines()
+ diff = set(k for k in common if left[k] != right[k])
+ if diff:
+ explanation += [u('Differing items:')]
+ for k in diff:
+ explanation += [py.io.saferepr({k: left[k]}) + ' != ' +
+ py.io.saferepr({k: right[k]})]
+ extra_left = set(left) - set(right)
+ if extra_left:
+ explanation.append(u('Left contains more items:'))
+ explanation.extend(py.std.pprint.pformat(
+ dict((k, left[k]) for k in extra_left)).splitlines())
+ extra_right = set(right) - set(left)
+ if extra_right:
+ explanation.append(u('Right contains more items:'))
+ explanation.extend(py.std.pprint.pformat(
+ dict((k, right[k]) for k in extra_right)).splitlines())
+ return explanation
+
+
+def _notin_text(term, text, verbose=False):
index = text.find(term)
head = text[:index]
tail = text[index+len(term):]
correct_text = head + tail
- diff = _diff_text(correct_text, text)
- newdiff = ['%s is contained here:' % py.io.saferepr(term, maxsize=42)]
+ diff = _diff_text(correct_text, text, verbose)
+ newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)]
for line in diff:
- if line.startswith('Skipping'):
+ if line.startswith(u('Skipping')):
continue
- if line.startswith('- '):
+ if line.startswith(u('- ')):
continue
- if line.startswith('+ '):
- newdiff.append(' ' + line[2:])
+ if line.startswith(u('+ ')):
+ newdiff.append(u(' ') + line[2:])
else:
newdiff.append(line)
return newdiff
diff --git a/_pytest/capture.py b/_pytest/capture.py
--- a/_pytest/capture.py
+++ b/_pytest/capture.py
@@ -1,43 +1,114 @@
-""" per-test stdout/stderr capturing mechanisms, ``capsys`` and ``capfd`` function arguments. """
+"""
+ per-test stdout/stderr capturing mechanisms,
+ ``capsys`` and ``capfd`` function arguments.
+"""
+# note: py.io capture was where copied from
+# pylib 1.4.20.dev2 (rev 13d9af95547e)
+import sys
+import os
+import tempfile
-import pytest, py
-import os
+import py
+import pytest
+
+try:
+ from io import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+try:
+ from io import BytesIO
+except ImportError:
+ class BytesIO(StringIO):
+ def write(self, data):
+ if isinstance(data, unicode):
+ raise TypeError("not a byte value: %r" % (data,))
+ StringIO.write(self, data)
+
+if sys.version_info < (3, 0):
+ class TextIO(StringIO):
+ def write(self, data):
+ if not isinstance(data, unicode):
+ enc = getattr(self, '_encoding', 'UTF-8')
+ data = unicode(data, enc, 'replace')
+ StringIO.write(self, data)
+else:
+ TextIO = StringIO
+
+
+patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'}
+
def pytest_addoption(parser):
group = parser.getgroup("general")
- group._addoption('--capture', action="store", default=None,
- metavar="method", type="choice", choices=['fd', 'sys', 'no'],
+ group._addoption(
+ '--capture', action="store", default=None,
+ metavar="method", choices=['fd', 'sys', 'no'],
help="per-test capturing method: one of fd (default)|sys|no.")
- group._addoption('-s', action="store_const", const="no", dest="capture",
+ group._addoption(
+ '-s', action="store_const", const="no", dest="capture",
help="shortcut for --capture=no.")
+
@pytest.mark.tryfirst
-def pytest_cmdline_parse(pluginmanager, args):
- # we want to perform capturing already for plugin/conftest loading
- if '-s' in args or "--capture=no" in args:
- method = "no"
- elif hasattr(os, 'dup') and '--capture=sys' not in args:
+def pytest_load_initial_conftests(early_config, parser, args, __multicall__):
+ ns = parser.parse_known_args(args)
+ method = ns.capture
+ if not method:
method = "fd"
- else:
+ if method == "fd" and not hasattr(os, "dup"):
method = "sys"
capman = CaptureManager(method)
- pluginmanager.register(capman, "capturemanager")
+ early_config.pluginmanager.register(capman, "capturemanager")
+
+ # make sure that capturemanager is properly reset at final shutdown
+ def teardown():
+ try:
+ capman.reset_capturings()
+ except ValueError:
+ pass
+
+ early_config.pluginmanager.add_shutdown(teardown)
+
+ # make sure logging does not raise exceptions at the end
+ def silence_logging_at_shutdown():
+ if "logging" in sys.modules:
+ sys.modules["logging"].raiseExceptions = False
+ early_config.pluginmanager.add_shutdown(silence_logging_at_shutdown)
+
+ # finally trigger conftest loading but while capturing (issue93)
+ capman.resumecapture()
+ try:
+ try:
+ return __multicall__.execute()
+ finally:
+ out, err = capman.suspendcapture()
+ except:
+ sys.stdout.write(out)
+ sys.stderr.write(err)
+ raise
+
def addouterr(rep, outerr):
for secname, content in zip(["out", "err"], outerr):
if content:
rep.sections.append(("Captured std%s" % secname, content))
+
class NoCapture:
def startall(self):
pass
+
def resume(self):
pass
+
def reset(self):
pass
+
def suspend(self):
return "", ""
+
class CaptureManager:
def __init__(self, defaultmethod=None):
self._method2capture = {}
@@ -45,21 +116,23 @@
def _maketempfile(self):
f = py.std.tempfile.TemporaryFile()
- newf = py.io.dupfile(f, encoding="UTF-8")
+ newf = dupfile(f, encoding="UTF-8")
f.close()
return newf
def _makestringio(self):
- return py.io.TextIO()
+ return TextIO()
def _getcapture(self, method):
if method == "fd":
- return py.io.StdCaptureFD(now=False,
- out=self._maketempfile(), err=self._maketempfile()
+ return StdCaptureFD(
+ out=self._maketempfile(),
+ err=self._maketempfile(),
)
elif method == "sys":
- return py.io.StdCapture(now=False,
- out=self._makestringio(), err=self._makestringio()
+ return StdCapture(
+ out=self._makestringio(),
+ err=self._makestringio(),
)
elif method == "no":
return NoCapture()
@@ -74,23 +147,24 @@
method = config._conftest.rget("option_capture", path=fspath)
except KeyError:
method = "fd"
- if method == "fd" and not hasattr(os, 'dup'): # e.g. jython
+ if method == "fd" and not hasattr(os, 'dup'): # e.g. jython
method = "sys"
return method
def reset_capturings(self):
- for name, cap in self._method2capture.items():
+ for cap in self._method2capture.values():
cap.reset()
def resumecapture_item(self, item):
method = self._getmethod(item.config, item.fspath)
if not hasattr(item, 'outerr'):
- item.outerr = ('', '') # we accumulate outerr on the item
+ item.outerr = ('', '') # we accumulate outerr on the item
return self.resumecapture(method)
def resumecapture(self, method=None):
if hasattr(self, '_capturing'):
- raise ValueError("cannot resume, already capturing with %r" %
+ raise ValueError(
+ "cannot resume, already capturing with %r" %
(self._capturing,))
if method is None:
method = self._defaultmethod
@@ -119,30 +193,29 @@
return "", ""
def activate_funcargs(self, pyfuncitem):
- if not hasattr(pyfuncitem, 'funcargs'):
- return
- assert not hasattr(self, '_capturing_funcargs')
- self._capturing_funcargs = capturing_funcargs = []
- for name, capfuncarg in pyfuncitem.funcargs.items():
- if name in ('capsys', 'capfd'):
- capturing_funcargs.append(capfuncarg)
- capfuncarg._start()
+ funcargs = getattr(pyfuncitem, "funcargs", None)
+ if funcargs is not None:
+ for name, capfuncarg in funcargs.items():
+ if name in ('capsys', 'capfd'):
+ assert not hasattr(self, '_capturing_funcarg')
+ self._capturing_funcarg = capfuncarg
+ capfuncarg._start()
def deactivate_funcargs(self):
- capturing_funcargs = getattr(self, '_capturing_funcargs', None)
- if capturing_funcargs is not None:
- while capturing_funcargs:
- capfuncarg = capturing_funcargs.pop()
- capfuncarg._finalize()
- del self._capturing_funcargs
+ capturing_funcarg = getattr(self, '_capturing_funcarg', None)
+ if capturing_funcarg:
+ outerr = capturing_funcarg._finalize()
+ del self._capturing_funcarg
+ return outerr
def pytest_make_collect_report(self, __multicall__, collector):
method = self._getmethod(collector.config, collector.fspath)
try:
self.resumecapture(method)
except ValueError:
- return # recursive collect, XXX refactor capturing
- # to allow for more lightweight recursive capturing
+ # recursive collect, XXX refactor capturing
+ # to allow for more lightweight recursive capturing
+ return
try:
rep = __multicall__.execute()
finally:
@@ -169,46 +242,371 @@
@pytest.mark.tryfirst
def pytest_runtest_makereport(self, __multicall__, item, call):
- self.deactivate_funcargs()
+ funcarg_outerr = self.deactivate_funcargs()
rep = __multicall__.execute()
outerr = self.suspendcapture(item)
- if not rep.passed:
- addouterr(rep, outerr)
+ if funcarg_outerr is not None:
+ outerr = (outerr[0] + funcarg_outerr[0],
+ outerr[1] + funcarg_outerr[1])
+ addouterr(rep, outerr)
if not rep.passed or rep.when == "teardown":
outerr = ('', '')
item.outerr = outerr
return rep
+error_capsysfderror = "cannot use capsys and capfd at the same time"
+
+
def pytest_funcarg__capsys(request):
"""enables capturing of writes to sys.stdout/sys.stderr and makes
captured output available via ``capsys.readouterr()`` method calls
which return a ``(out, err)`` tuple.
"""
- return CaptureFuncarg(py.io.StdCapture)
+ if "capfd" in request._funcargs:
+ raise request.raiseerror(error_capsysfderror)
+ return CaptureFixture(StdCapture)
+
def pytest_funcarg__capfd(request):
"""enables capturing of writes to file descriptors 1 and 2 and makes
captured output available via ``capsys.readouterr()`` method calls
which return a ``(out, err)`` tuple.
"""
+ if "capsys" in request._funcargs:
+ request.raiseerror(error_capsysfderror)
if not hasattr(os, 'dup'):
- py.test.skip("capfd funcarg needs os.dup")
- return CaptureFuncarg(py.io.StdCaptureFD)
+ pytest.skip("capfd funcarg needs os.dup")
+ return CaptureFixture(StdCaptureFD)
-class CaptureFuncarg:
+
+class CaptureFixture:
def __init__(self, captureclass):
- self.capture = captureclass(now=False)
+ self._capture = captureclass()
def _start(self):
- self.capture.startall()
+ self._capture.startall()
def _finalize(self):
- if hasattr(self, 'capture'):
- self.capture.reset()
- del self.capture
+ if hasattr(self, '_capture'):
+ outerr = self._outerr = self._capture.reset()
+ del self._capture
+ return outerr
def readouterr(self):
- return self.capture.readouterr()
+ try:
+ return self._capture.readouterr()
+ except AttributeError:
+ return self._outerr
def close(self):
self._finalize()
+
+
+class FDCapture:
+ """ Capture IO to/from a given os-level filedescriptor. """
+
+ def __init__(self, targetfd, tmpfile=None, patchsys=False):
+ """ save targetfd descriptor, and open a new
+ temporary file there. If no tmpfile is
+ specified a tempfile.Tempfile() will be opened
+ in text mode.
+ """
+ self.targetfd = targetfd
+ if tmpfile is None and targetfd != 0:
+ f = tempfile.TemporaryFile('wb+')
+ tmpfile = dupfile(f, encoding="UTF-8")
+ f.close()
+ self.tmpfile = tmpfile
+ self._savefd = os.dup(self.targetfd)
+ if patchsys:
+ self._oldsys = getattr(sys, patchsysdict[targetfd])
+
+ def start(self):
+ try:
+ os.fstat(self._savefd)
+ except OSError:
+ raise ValueError(
+ "saved filedescriptor not valid, "
+ "did you call start() twice?")
+ if self.targetfd == 0 and not self.tmpfile:
+ fd = os.open(os.devnull, os.O_RDONLY)
+ os.dup2(fd, 0)
+ os.close(fd)
+ if hasattr(self, '_oldsys'):
+ setattr(sys, patchsysdict[self.targetfd], DontReadFromInput())
+ else:
+ os.dup2(self.tmpfile.fileno(), self.targetfd)
+ if hasattr(self, '_oldsys'):
+ setattr(sys, patchsysdict[self.targetfd], self.tmpfile)
+
+ def done(self):
+ """ unpatch and clean up, returns the self.tmpfile (file object)
+ """
+ os.dup2(self._savefd, self.targetfd)
+ os.close(self._savefd)
+ if self.targetfd != 0:
+ self.tmpfile.seek(0)
+ if hasattr(self, '_oldsys'):
+ setattr(sys, patchsysdict[self.targetfd], self._oldsys)
+ return self.tmpfile
+
+ def writeorg(self, data):
+ """ write a string to the original file descriptor
+ """
+ tempfp = tempfile.TemporaryFile()
+ try:
+ os.dup2(self._savefd, tempfp.fileno())
+ tempfp.write(data)
+ finally:
+ tempfp.close()
+
+
+def dupfile(f, mode=None, buffering=0, raising=False, encoding=None):
+ """ return a new open file object that's a duplicate of f
+
+ mode is duplicated if not given, 'buffering' controls
+ buffer size (defaulting to no buffering) and 'raising'
+ defines whether an exception is raised when an incompatible
+ file object is passed in (if raising is False, the file
+ object itself will be returned)
+ """
+ try:
+ fd = f.fileno()
+ mode = mode or f.mode
+ except AttributeError:
+ if raising:
+ raise
+ return f
+ newfd = os.dup(fd)
+ if sys.version_info >= (3, 0):
+ if encoding is not None:
+ mode = mode.replace("b", "")
+ buffering = True
+ return os.fdopen(newfd, mode, buffering, encoding, closefd=True)
+ else:
+ f = os.fdopen(newfd, mode, buffering)
+ if encoding is not None:
+ return EncodedFile(f, encoding)
+ return f
+
+
+class EncodedFile(object):
+ def __init__(self, _stream, encoding):
+ self._stream = _stream
+ self.encoding = encoding
+
+ def write(self, obj):
+ if isinstance(obj, unicode):
+ obj = obj.encode(self.encoding)
+ self._stream.write(obj)
+
+ def writelines(self, linelist):
+ data = ''.join(linelist)
+ self.write(data)
+
+ def __getattr__(self, name):
+ return getattr(self._stream, name)
+
+
+class Capture(object):
+ def reset(self):
+ """ reset sys.stdout/stderr and return captured output as strings. """
+ if hasattr(self, '_reset'):
+ raise ValueError("was already reset")
+ self._reset = True
+ outfile, errfile = self.done(save=False)
+ out, err = "", ""
+ if outfile and not outfile.closed:
+ out = outfile.read()
+ outfile.close()
+ if errfile and errfile != outfile and not errfile.closed:
+ err = errfile.read()
+ errfile.close()
+ return out, err
+
+ def suspend(self):
+ """ return current snapshot captures, memorize tempfiles. """
+ outerr = self.readouterr()
+ outfile, errfile = self.done()
+ return outerr
+
+
+class StdCaptureFD(Capture):
+ """ This class allows to capture writes to FD1 and FD2
+ and may connect a NULL file to FD0 (and prevent
+ reads from sys.stdin). If any of the 0,1,2 file descriptors
+ is invalid it will not be captured.
+ """
+ def __init__(self, out=True, err=True, in_=True, patchsys=True):
+ self._options = {
+ "out": out,
+ "err": err,
+ "in_": in_,
+ "patchsys": patchsys,
+ }
+ self._save()
+
+ def _save(self):
+ in_ = self._options['in_']
+ out = self._options['out']
+ err = self._options['err']
+ patchsys = self._options['patchsys']
+ if in_:
+ try:
+ self.in_ = FDCapture(
+ 0, tmpfile=None,
+ patchsys=patchsys)
+ except OSError:
+ pass
+ if out:
+ tmpfile = None
+ if hasattr(out, 'write'):
+ tmpfile = out
+ try:
+ self.out = FDCapture(
+ 1, tmpfile=tmpfile,
+ patchsys=patchsys)
+ self._options['out'] = self.out.tmpfile
+ except OSError:
+ pass
+ if err:
+ if hasattr(err, 'write'):
+ tmpfile = err
+ else:
+ tmpfile = None
+ try:
+ self.err = FDCapture(
+ 2, tmpfile=tmpfile,
+ patchsys=patchsys)
+ self._options['err'] = self.err.tmpfile
+ except OSError:
+ pass
+
+ def startall(self):
+ if hasattr(self, 'in_'):
+ self.in_.start()
+ if hasattr(self, 'out'):
+ self.out.start()
+ if hasattr(self, 'err'):
+ self.err.start()
+
+ def resume(self):
+ """ resume capturing with original temp files. """
+ self.startall()
+
+ def done(self, save=True):
+ """ return (outfile, errfile) and stop capturing. """
+ outfile = errfile = None
+ if hasattr(self, 'out') and not self.out.tmpfile.closed:
+ outfile = self.out.done()
+ if hasattr(self, 'err') and not self.err.tmpfile.closed:
+ errfile = self.err.done()
+ if hasattr(self, 'in_'):
+ self.in_.done()
+ if save:
+ self._save()
+ return outfile, errfile
+
+ def readouterr(self):
+ """ return snapshot value of stdout/stderr capturings. """
+ out = self._readsnapshot('out')
+ err = self._readsnapshot('err')
+ return out, err
+
+ def _readsnapshot(self, name):
+ if hasattr(self, name):
+ f = getattr(self, name).tmpfile
+ else:
+ return ''
+
+ f.seek(0)
+ res = f.read()
+ enc = getattr(f, "encoding", None)
+ if enc:
+ res = py.builtin._totext(res, enc, "replace")
+ f.truncate(0)
+ f.seek(0)
+ return res
+
+
+class StdCapture(Capture):
+ """ This class allows to capture writes to sys.stdout|stderr "in-memory"
+ and will raise errors on tries to read from sys.stdin. It only
+ modifies sys.stdout|stderr|stdin attributes and does not
+ touch underlying File Descriptors (use StdCaptureFD for that).
+ """
+ def __init__(self, out=True, err=True, in_=True):
+ self._oldout = sys.stdout
+ self._olderr = sys.stderr
+ self._oldin = sys.stdin
+ if out and not hasattr(out, 'file'):
+ out = TextIO()
+ self.out = out
+ if err:
+ if not hasattr(err, 'write'):
+ err = TextIO()
+ self.err = err
+ self.in_ = in_
+
+ def startall(self):
+ if self.out:
+ sys.stdout = self.out
+ if self.err:
+ sys.stderr = self.err
+ if self.in_:
+ sys.stdin = self.in_ = DontReadFromInput()
+
+ def done(self, save=True):
+ """ return (outfile, errfile) and stop capturing. """
+ outfile = errfile = None
+ if self.out and not self.out.closed:
+ sys.stdout = self._oldout
+ outfile = self.out
+ outfile.seek(0)
+ if self.err and not self.err.closed:
+ sys.stderr = self._olderr
+ errfile = self.err
+ errfile.seek(0)
+ if self.in_:
+ sys.stdin = self._oldin
+ return outfile, errfile
+
+ def resume(self):
+ """ resume capturing with original temp files. """
+ self.startall()
+
+ def readouterr(self):
+ """ return snapshot value of stdout/stderr capturings. """
+ out = err = ""
+ if self.out:
+ out = self.out.getvalue()
+ self.out.truncate(0)
+ self.out.seek(0)
+ if self.err:
+ err = self.err.getvalue()
+ self.err.truncate(0)
+ self.err.seek(0)
+ return out, err
+
+
+class DontReadFromInput:
+ """Temporary stub class. Ideally when stdin is accessed, the
+ capturing should be turned off, with possibly all data captured
+ so far sent to the screen. This should be configurable, though,
+ because in automated test runs it is better to crash than
+ hang indefinitely.
+ """
+ def read(self, *args):
+ raise IOError("reading from stdin while output is captured")
+ readline = read
+ readlines = read
+ __iter__ = read
+
+ def fileno(self):
+ raise ValueError("redirected Stdin is pseudofile, has no fileno()")
+
+ def isatty(self):
+ return False
+
+ def close(self):
+ pass
diff --git a/_pytest/config.py b/_pytest/config.py
--- a/_pytest/config.py
+++ b/_pytest/config.py
@@ -1,25 +1,91 @@
""" command line options, ini-file and conftest.py processing. """
import py
+# DON't import pytest here because it causes import cycle troubles
import sys, os
+from _pytest import hookspec # the extension point definitions
from _pytest.core import PluginManager
-import pytest
-def pytest_cmdline_parse(pluginmanager, args):
- config = Config(pluginmanager)
- config.parse(args)
- return config
+# pytest startup
-def pytest_unconfigure(config):
- while 1:
- try:
- fin = config._cleanup.pop()
- except IndexError:
- break
- fin()
+def main(args=None, plugins=None):
+ """ return exit code, after performing an in-process test run.
+
+ :arg args: list of command line arguments.
+
+ :arg plugins: list of plugin objects to be auto-registered during
+ initialization.
+ """
+ config = _prepareconfig(args, plugins)
+ return config.hook.pytest_cmdline_main(config=config)
+
+class cmdline: # compatibility namespace
+ main = staticmethod(main)
+
+class UsageError(Exception):
+ """ error in pytest usage or invocation"""
+
+_preinit = []
+
+default_plugins = (
+ "mark main terminal runner python pdb unittest capture skipping "
+ "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion genscript "
+ "junitxml resultlog doctest").split()
+
+def _preloadplugins():
+ assert not _preinit
+ _preinit.append(get_plugin_manager())
+
+def get_plugin_manager():
+ if _preinit:
+ return _preinit.pop(0)
+ # subsequent calls to main will create a fresh instance
+ pluginmanager = PytestPluginManager()
+ pluginmanager.config = Config(pluginmanager) # XXX attr needed?
+ for spec in default_plugins:
+ pluginmanager.import_plugin(spec)
+ return pluginmanager
+
+def _prepareconfig(args=None, plugins=None):
+ if args is None:
+ args = sys.argv[1:]
+ elif isinstance(args, py.path.local):
+ args = [str(args)]
+ elif not isinstance(args, (tuple, list)):
+ if not isinstance(args, str):
+ raise ValueError("not a string or argument list: %r" % (args,))
+ args = py.std.shlex.split(args)
+ pluginmanager = get_plugin_manager()
+ if plugins:
+ for plugin in plugins:
+ pluginmanager.register(plugin)
+ return pluginmanager.hook.pytest_cmdline_parse(
+ pluginmanager=pluginmanager, args=args)
+
+class PytestPluginManager(PluginManager):
+ def __init__(self, hookspecs=[hookspec]):
+ super(PytestPluginManager, self).__init__(hookspecs=hookspecs)
+ self.register(self)
+ if os.environ.get('PYTEST_DEBUG'):
+ err = sys.stderr
+ encoding = getattr(err, 'encoding', 'utf8')
+ try:
+ err = py.io.dupfile(err, encoding=encoding)
+ except Exception:
+ pass
+ self.trace.root.setwriter(err.write)
+
+ def pytest_configure(self, config):
+ config.addinivalue_line("markers",
+ "tryfirst: mark a hook implementation function such that the "
+ "plugin machinery will try to call it first/as early as possible.")
+ config.addinivalue_line("markers",
+ "trylast: mark a hook implementation function such that the "
+ "plugin machinery will try to call it last/as late as possible.")
+
class Parser:
- """ Parser for command line arguments. """
+ """ Parser for command line arguments and ini-file values. """
def __init__(self, usage=None, processopt=None):
self._anonymous = OptionGroup("custom options", parser=self)
@@ -35,15 +101,17 @@
if option.dest:
self._processopt(option)
- def addnote(self, note):
- self._notes.append(note)
-
def getgroup(self, name, description="", after=None):
""" get (or create) a named option Group.
- :name: unique name of the option group.
+ :name: name of the option group.
:description: long description for --help output.
:after: name of other group, used for ordering --help output.
+
+ The returned group object has an ``addoption`` method with the same
+ signature as :py:func:`parser.addoption
+ <_pytest.config.Parser.addoption>` but will be shown in the
+ respective group in the output of ``pytest. --help``.
"""
for group in self._groups:
if group.name == name:
@@ -57,33 +125,222 @@
return group
def addoption(self, *opts, **attrs):
- """ add an optparse-style option. """
+ """ register a command line option.
+
+ :opts: option names, can be short or long options.
+ :attrs: same attributes which the ``add_option()`` function of the
+ `argparse library
+ `_
+ accepts.
+
+ After command line parsing options are available on the pytest config
+ object via ``config.option.NAME`` where ``NAME`` is usually set
+ by passing a ``dest`` attribute, for example
+ ``addoption("--long", dest="NAME", ...)``.
+ """
self._anonymous.addoption(*opts, **attrs)
def parse(self, args):
- self.optparser = optparser = MyOptionParser(self)
+ from _pytest._argcomplete import try_argcomplete
+ self.optparser = self._getparser()
+ try_argcomplete(self.optparser)
+ return self.optparser.parse_args([str(x) for x in args])
+
+ def _getparser(self):
+ from _pytest._argcomplete import filescompleter
+ optparser = MyOptionParser(self)
groups = self._groups + [self._anonymous]
for group in groups:
if group.options:
desc = group.description or group.name
- optgroup = py.std.optparse.OptionGroup(optparser, desc)
- optgroup.add_options(group.options)
- optparser.add_option_group(optgroup)
- return self.optparser.parse_args([str(x) for x in args])
+ arggroup = optparser.add_argument_group(desc)
+ for option in group.options:
+ n = option.names()
+ a = option.attrs()
+ arggroup.add_argument(*n, **a)
+ # bash like autocompletion for dirs (appending '/')
+ optparser.add_argument(FILE_OR_DIR, nargs='*'
+ ).completer=filescompleter
+ return optparser
def parse_setoption(self, args, option):
- parsedoption, args = self.parse(args)
+ parsedoption = self.parse(args)
for name, value in parsedoption.__dict__.items():
setattr(option, name, value)
- return args
+ return getattr(parsedoption, FILE_OR_DIR)
+
+ def parse_known_args(self, args):
+ optparser = self._getparser()
+ args = [str(x) for x in args]
+ return optparser.parse_known_args(args)[0]
def addini(self, name, help, type=None, default=None):
- """ add an ini-file option with the given name and description. """
+ """ register an ini-file option.
+
+ :name: name of the ini-variable
+ :type: type of the variable, can be ``pathlist``, ``args`` or ``linelist``.
+ :default: default value if no ini-file option exists but is queried.
+
+ The value of ini-variables can be retrieved via a call to
+ :py:func:`config.getini(name) <_pytest.config.Config.getini>`.
+ """
assert type in (None, "pathlist", "args", "linelist")
self._inidict[name] = (help, type, default)
self._ininames.append(name)
+class ArgumentError(Exception):
+ """
+ Raised if an Argument instance is created with invalid or
+ inconsistent arguments.
+ """
+
+ def __init__(self, msg, option):
+ self.msg = msg
+ self.option_id = str(option)
+
+ def __str__(self):
+ if self.option_id:
+ return "option %s: %s" % (self.option_id, self.msg)
+ else:
+ return self.msg
+
+
+class Argument:
+ """class that mimics the necessary behaviour of py.std.optparse.Option """
+ _typ_map = {
+ 'int': int,
+ 'string': str,
+ }
+ # enable after some grace period for plugin writers
+ TYPE_WARN = False
+
+ def __init__(self, *names, **attrs):
+ """store parms in private vars for use in add_argument"""
+ self._attrs = attrs
+ self._short_opts = []
+ self._long_opts = []
+ self.dest = attrs.get('dest')
+ if self.TYPE_WARN:
+ try:
+ help = attrs['help']
+ if '%default' in help:
+ py.std.warnings.warn(
+ 'pytest now uses argparse. "%default" should be'
+ ' changed to "%(default)s" ',
+ FutureWarning,
+ stacklevel=3)
+ except KeyError:
+ pass
+ try:
+ typ = attrs['type']
+ except KeyError:
+ pass
+ else:
+ # this might raise a keyerror as well, don't want to catch that
+ if isinstance(typ, py.builtin._basestring):
+ if typ == 'choice':
+ if self.TYPE_WARN:
+ py.std.warnings.warn(
+ 'type argument to addoption() is a string %r.'
+ ' For parsearg this is optional and when supplied '
+ ' should be a type.'
+ ' (options: %s)' % (typ, names),
+ FutureWarning,
+ stacklevel=3)
+ # argparse expects a type here take it from
+ # the type of the first element
+ attrs['type'] = type(attrs['choices'][0])
+ else:
+ if self.TYPE_WARN:
+ py.std.warnings.warn(
+ 'type argument to addoption() is a string %r.'
+ ' For parsearg this should be a type.'
+ ' (options: %s)' % (typ, names),
+ FutureWarning,
+ stacklevel=3)
+ attrs['type'] = Argument._typ_map[typ]
+ # used in test_parseopt -> test_parse_defaultgetter
+ self.type = attrs['type']
+ else:
+ self.type = typ
+ try:
+ # attribute existence is tested in Config._processopt
+ self.default = attrs['default']
+ except KeyError:
+ pass
+ self._set_opt_strings(names)
+ if not self.dest:
+ if self._long_opts:
+ self.dest = self._long_opts[0][2:].replace('-', '_')
+ else:
+ try:
+ self.dest = self._short_opts[0][1:]
+ except IndexError:
+ raise ArgumentError(
+ 'need a long or short option', self)
+
+ def names(self):
+ return self._short_opts + self._long_opts
+
+ def attrs(self):
+ # update any attributes set by processopt
+ attrs = 'default dest help'.split()
+ if self.dest:
+ attrs.append(self.dest)
+ for attr in attrs:
+ try:
+ self._attrs[attr] = getattr(self, attr)
+ except AttributeError:
+ pass
+ if self._attrs.get('help'):
+ a = self._attrs['help']
+ a = a.replace('%default', '%(default)s')
+ #a = a.replace('%prog', '%(prog)s')
+ self._attrs['help'] = a
+ return self._attrs
+
+ def _set_opt_strings(self, opts):
+ """directly from optparse
+
+ might not be necessary as this is passed to argparse later on"""
+ for opt in opts:
+ if len(opt) < 2:
+ raise ArgumentError(
+ "invalid option string %r: "
+ "must be at least two characters long" % opt, self)
+ elif len(opt) == 2:
+ if not (opt[0] == "-" and opt[1] != "-"):
+ raise ArgumentError(
+ "invalid short option string %r: "
+ "must be of the form -x, (x any non-dash char)" % opt,
+ self)
+ self._short_opts.append(opt)
+ else:
+ if not (opt[0:2] == "--" and opt[2] != "-"):
+ raise ArgumentError(
+ "invalid long option string %r: "
+ "must start with --, followed by non-dash" % opt,
+ self)
+ self._long_opts.append(opt)
+
+ def __repr__(self):
+ retval = 'Argument('
+ if self._short_opts:
+ retval += '_short_opts: ' + repr(self._short_opts) + ', '
+ if self._long_opts:
+ retval += '_long_opts: ' + repr(self._long_opts) + ', '
+ retval += 'dest: ' + repr(self.dest) + ', '
+ if hasattr(self, 'type'):
+ retval += 'type: ' + repr(self.type) + ', '
+ if hasattr(self, 'default'):
+ retval += 'default: ' + repr(self.default) + ', '
+ if retval[-2:] == ', ': # always long enough to test ("Argument(" )
+ retval = retval[:-2]
+ retval += ')'
+ return retval
+
+
class OptionGroup:
def __init__(self, name, description="", parser=None):
self.name = name
@@ -92,12 +349,18 @@
self.parser = parser
def addoption(self, *optnames, **attrs):
- """ add an option to this group. """
- option = py.std.optparse.Option(*optnames, **attrs)
+ """ add an option to this group.
+
+ if a shortened version of a long option is specified it will
+ be suppressed in the help. addoption('--twowords', '--two-words')
+ results in help showing '--two-words' only, but --twowords gets
+ accepted **and** the automatic destination is in args.twowords
+ """
+ option = Argument(*optnames, **attrs)
self._addoption_instance(option, shortupper=False)
def _addoption(self, *optnames, **attrs):
- option = py.std.optparse.Option(*optnames, **attrs)
+ option = Argument(*optnames, **attrs)
self._addoption_instance(option, shortupper=True)
From noreply at buildbot.pypy.org Sun Aug 17 20:14:06 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Sun, 17 Aug 2014 20:14:06 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Fix the test and the code
for stmrewrite
Message-ID: <20140817181406.C26581C059C@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72853:477a18370cb0
Date: 2014-08-17 20:13 +0200
http://bitbucket.org/pypy/pypy/changeset/477a18370cb0/
Log: Fix the test and the code for stmrewrite
diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py
--- a/rpython/jit/backend/llsupport/stmrewrite.py
+++ b/rpython/jit/backend/llsupport/stmrewrite.py
@@ -23,14 +23,6 @@
self.newops.append(op)
return
# ---------- transaction breaks ----------
- if opnum == rop.STM_SHOULD_BREAK_TRANSACTION:
- self.handle_should_break_transaction(op)
- return
- if opnum == rop.STM_TRANSACTION_BREAK:
- self.emitting_an_operation_that_can_collect()
- self.next_op_may_be_in_new_transaction()
- self.newops.append(op)
- return
if opnum == rop.STM_HINT_COMMIT_SOON:
self._do_stm_call('stm_hint_commit_soon', [], None,
op.stm_location)
@@ -84,14 +76,19 @@
self.next_op_may_be_in_new_transaction()
self.newops.append(op)
return
- # ---------- jumps, finish, other ignored ops ----------
- if opnum in (rop.JUMP, rop.FINISH, rop.FORCE_TOKEN,
+ # ---------- other ignored ops ----------
+ if opnum in (rop.STM_SHOULD_BREAK_TRANSACTION, rop.FORCE_TOKEN,
rop.READ_TIMESTAMP, rop.MARK_OPAQUE_PTR,
rop.JIT_DEBUG, rop.KEEPALIVE,
rop.QUASIIMMUT_FIELD, rop.RECORD_KNOWN_CLASS,
):
self.newops.append(op)
return
+ # ---------- jump, finish ----------
+ if opnum == rop.JUMP or opnum == rop.FINISH:
+ self.add_dummy_allocation()
+ self.newops.append(op)
+ return
# ---------- fall-back ----------
# Check that none of the ops handled here can collect.
# This is not done by the fallback here
@@ -122,7 +119,7 @@
self.newops.append(op1)
self.read_barrier_applied[v_ptr] = None
- def handle_should_break_transaction(self, op):
+ def add_dummy_allocation(self):
if not self.does_any_allocation:
# do a fake allocation since this is needed to check
# for requested safe-points:
@@ -134,9 +131,6 @@
assert self._op_malloc_nursery is None # no ongoing allocation
self.gen_malloc_nursery(size, v_result)
- self.newops.append(op)
-
-
def must_apply_write_barrier(self, val, v=None):
return val not in self.write_barrier_applied
diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py
--- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py
+++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py
@@ -65,8 +65,10 @@
def check_rewrite(self, frm_operations, to_operations, **namespace):
inev = ("call(ConstClass(stm_try_inevitable),"
" descr=stm_try_inevitable_descr)")
+ dummyalloc = "p999 = call_malloc_nursery(16)"
frm_operations = frm_operations.replace('$INEV', inev)
to_operations = to_operations .replace('$INEV', inev)
+ to_operations = to_operations .replace('$DUMMYALLOC', dummyalloc)
for name, value in self.gc_ll_descr.__dict__.items():
if name.endswith('descr') and name[1] == '2' and len(name) == 8:
namespace[name] = value # "X2Ydescr"
@@ -94,6 +96,7 @@
[]
%s
call(123, descr=cd)
+ $DUMMYALLOC
jump()
""" % ("$INEV" if inev else "",), cd=calldescr)
@@ -106,6 +109,7 @@
[p1, p2]
cond_call_gc_wb(p1, descr=wbdescr)
setfield_gc(p1, p2, descr=tzdescr)
+ $DUMMYALLOC
jump()
""")
@@ -118,6 +122,7 @@
[p1, i2]
cond_call_gc_wb(p1, descr=wbdescr)
setfield_gc(p1, i2, descr=tzdescr)
+ $DUMMYALLOC
jump()
""")
@@ -132,6 +137,7 @@
[p1, p2]
cond_call_gc_wb(ConstPtr(t), descr=wbdescr)
setfield_gc(ConstPtr(t), p2, descr=tzdescr)
+ $DUMMYALLOC
jump()
""", t=NULL)
@@ -144,6 +150,7 @@
[p1]
p2 = getfield_gc(p1, descr=tzdescr)
stm_read(p1)
+ $DUMMYALLOC
jump()
""")
@@ -163,6 +170,7 @@
p5 = getfield_gc(p2, descr=tzdescr)
stm_read(p2)
p6 = getfield_gc(p1, descr=tzdescr)
+ $DUMMYALLOC
jump()
""")
@@ -177,6 +185,7 @@
cond_call_gc_wb(p1, descr=wbdescr)
setfield_gc(p1, i2, descr=tydescr)
p3 = getfield_gc(p1, descr=tzdescr)
+ $DUMMYALLOC
jump(p3)
""")
@@ -196,6 +205,7 @@
cond_call_gc_wb(p2, descr=wbdescr)
setfield_gc(p2, p0, descr=tzdescr)
p4 = getfield_gc(p1, descr=tzdescr)
+ $DUMMYALLOC
jump()
""", t=NULL)
@@ -274,6 +284,7 @@
setfield_gc(p1, p2, descr=tzdescr)
cond_call_gc_wb(p3, descr=wbdescr)
setfield_gc(p3, p4, descr=tzdescr)
+ $DUMMYALLOC
jump()
""")
@@ -288,6 +299,7 @@
cond_call_gc_wb(p1, descr=wbdescr)
setfield_gc(p1, p2, descr=tzdescr)
setfield_gc(p1, i3, descr=tydescr)
+ $DUMMYALLOC
jump()
""")
@@ -305,6 +317,7 @@
label(p1, i3)
cond_call_gc_wb(p1, descr=wbdescr)
setfield_gc(p1, i3, descr=tydescr)
+ $DUMMYALLOC
jump(p1)
""")
@@ -317,6 +330,7 @@
""", """
[i1, i2]
+ $DUMMYALLOC
jump()
""")
@@ -337,9 +351,10 @@
testcase = """
[i1, i2, p1, p2, f1]
%s
+ $DUMMYALLOC
finish()
""" % op
- self.check_rewrite(testcase, testcase)
+ self.check_rewrite(testcase.replace('$DUMMYALLOC', ''), testcase)
def test_rewrite_getfield_gc_const(self):
TP = lltype.GcArray(lltype.Signed)
@@ -352,9 +367,9 @@
[p1]
p2 = getfield_gc(ConstPtr(t), descr=tzdescr)
stm_read(ConstPtr(t))
+ $DUMMYALLOC
jump(p2)
""", t=NULL)
- # XXX could do better: G2Rdescr
def test_rewrite_getarrayitem_gc(self):
self.check_rewrite("""
@@ -365,6 +380,7 @@
[p1, i2]
i3 = getarrayitem_gc(p1, i2, descr=adescr)
stm_read(p1)
+ $DUMMYALLOC
jump(i3)
""")
@@ -377,6 +393,7 @@
[p1, i2]
i3 = getinteriorfield_gc(p1, i2, descr=intzdescr)
stm_read(p1)
+ $DUMMYALLOC
jump(i3)
""")
@@ -392,6 +409,7 @@
stm_read(p1)
i2 = getfield_gc(p2, descr=tydescr)
stm_read(p2)
+ $DUMMYALLOC
jump(p2, i2)
""")
@@ -411,7 +429,7 @@
i2 = int_add(i1, 1)
cond_call_gc_wb(p1, descr=wbdescr)
setfield_gc(p1, i2, descr=tydescr)
-
+ $DUMMYALLOC
jump(p1)
""")
@@ -438,6 +456,7 @@
call(p2, descr=calldescr1)
cond_call_gc_wb(p1, descr=wbdescr)
setfield_gc(p1, 5, descr=tydescr)
+ $DUMMYALLOC
jump(p2)
""", calldescr1=calldescr1)
@@ -454,7 +473,7 @@
i3 = getfield_raw(i1, descr=tydescr)
keepalive(i3)
i4 = getfield_raw(i2, descr=tydescr)
-
+ $DUMMYALLOC
jump(i3, i4)
""")
@@ -470,7 +489,7 @@
""", """
[i1]
i2 = getfield_raw(i1, descr=fdescr)
-
+ $DUMMYALLOC
jump(i2)
""", fdescr=fdescr)
@@ -488,7 +507,7 @@
label(i1, i2, i3)
$INEV
i4 = getfield_raw(i2, descr=tydescr)
-
+ $DUMMYALLOC
jump(i3, i4)
""")
@@ -503,7 +522,7 @@
$INEV
i3 = getarrayitem_raw(i1, 5, descr=adescr)
i4 = getarrayitem_raw(i2, i3, descr=adescr)
-
+ $DUMMYALLOC
jump(i3, i4)
""")
@@ -519,7 +538,7 @@
setarrayitem_gc(p1, i1, p2, descr=adescr)
cond_call_gc_wb_array(p3, i3, descr=wbdescr)
setarrayitem_gc(p3, i3, p4, descr=adescr)
-
+ $DUMMYALLOC
jump()
""")
@@ -537,7 +556,7 @@
i4 = read_timestamp()
cond_call_gc_wb_array(p1, i3, descr=wbdescr)
setarrayitem_gc(p1, i3, p3, descr=adescr)
-
+ $DUMMYALLOC
jump()
""")
@@ -555,7 +574,7 @@
i4 = read_timestamp()
cond_call_gc_wb_array(p1, i3, descr=wbdescr)
setinteriorfield_gc(p1, i3, p3, descr=intzdescr)
-
+ $DUMMYALLOC
jump()
""")
@@ -570,7 +589,7 @@
cond_call_gc_wb(p1, descr=wbdescr)
strsetitem(p1, i2, i3)
unicodesetitem(p1, i2, i3)
-
+ $DUMMYALLOC
jump()
""")
@@ -585,6 +604,7 @@
[i2, i3]
p1 = call_malloc_nursery_varsize(1, 1, i3, descr=strdescr)
setfield_gc(p1, i3, descr=strlendescr)
+ cond_call_gc_wb(p1, descr=wbdescr)
strsetitem(p1, i2, i3)
unicodesetitem(p1, i2, i3)
jump()
@@ -600,6 +620,7 @@
[p1, i2, i3]
i4=strgetitem(p1, i2)
i5=unicodegetitem(p1, i2)
+ $DUMMYALLOC
jump()
""")
@@ -623,6 +644,7 @@
cond_call_gc_wb(p7, descr=wbdescr)
setfield_gc(p7, 20, descr=tydescr)
+ $DUMMYALLOC
jump(i2, p7)
""", calldescr2=calldescr2)
@@ -651,6 +673,7 @@
cond_call_gc_wb(p7, descr=wbdescr)
setfield_gc(p7, 20, descr=tydescr)
+ $DUMMYALLOC
jump(i2, p7)
""" % op, calldescr2=calldescr2)
@@ -664,6 +687,7 @@
[p1, i1, i2, i3]
p2 = call_malloc_nursery_varsize(1, 1, i3, descr=strdescr)
setfield_gc(p2, i3, descr=strlendescr)
+ cond_call_gc_wb(p2, descr=wbdescr)
copystrcontent(p1, p2, i1, i2, i3)
jump()
""")
@@ -677,6 +701,7 @@
[p1, p2, i1, i2, i3]
cond_call_gc_wb(p2, descr=wbdescr)
copystrcontent(p1, p2, i1, i2, i3)
+ $DUMMYALLOC
jump()
""")
@@ -698,7 +723,7 @@
setfield_gc(p1, 10, descr=tydescr)
%s
setfield_gc(p1, 20, descr=tydescr)
-
+ $DUMMYALLOC
jump(p1)
""" % op)
@@ -734,6 +759,7 @@
cond_call_gc_wb(p1, descr=wbdescr)
setfield_gc(p1, 20, descr=tydescr)
+ $DUMMYALLOC
jump(p1)
""" % (op, guard, tr_break), calldescr2=calldescr2)
@@ -794,24 +820,18 @@
self.check_rewrite("""
[p1, p2]
i1 = ptr_eq(p1, NULL)
- jump(i1)
""", """
[p1, p2]
i1 = ptr_eq(p1, NULL)
-
- jump(i1)
""")
def test_ptr_eq(self):
self.check_rewrite("""
[p1, p2]
i1 = ptr_eq(p1, p2)
- jump(i1)
""", """
[p1, p2]
i1 = ptr_eq(p1, p2)
-
- jump(i1)
""")
def test_instance_ptr_eq(self):
@@ -822,7 +842,7 @@
""", """
[p1, p2]
i1 = instance_ptr_eq(p1, p2)
-
+ $DUMMYALLOC
jump(i1)
""")
@@ -830,24 +850,18 @@
self.check_rewrite("""
[p1, p2]
i1 = ptr_ne(p1, p2)
- jump(i1)
""", """
[p1, p2]
i1 = ptr_ne(p1, p2)
-
- jump(i1)
""")
def test_instance_ptr_ne(self):
self.check_rewrite("""
[p1, p2]
i1 = instance_ptr_ne(p1, p2)
- jump(i1)
""", """
[p1, p2]
i1 = instance_ptr_ne(p1, p2)
-
- jump(i1)
""")
# ----------- tests copied from rewrite.py -------------
@@ -856,10 +870,12 @@
self.check_rewrite("""
[p1]
p0 = new(descr=sdescr)
+ jump()
""", """
[p1]
p0 = call_malloc_nursery(%(sdescr.size)d)
setfield_gc(p0, 1234, descr=tiddescr)
+ jump()
""")
def test_rewrite_assembler_new3_to_malloc(self):
@@ -868,6 +884,7 @@
p0 = new(descr=sdescr)
p1 = new(descr=tdescr)
p2 = new(descr=sdescr)
+ jump()
""", """
[]
p0 = call_malloc_nursery( \
@@ -877,18 +894,21 @@
setfield_gc(p1, 5678, descr=tiddescr)
p2 = int_add(p1, %(tdescr.size)d)
setfield_gc(p2, 1234, descr=tiddescr)
+ jump()
""")
def test_rewrite_assembler_new_array_fixed_to_malloc(self):
self.check_rewrite("""
[]
p0 = new_array(10, descr=adescr)
+ jump()
""", """
[]
p0 = call_malloc_nursery( \
%(adescr.basesize + 10 * adescr.itemsize)d)
setfield_gc(p0, 4321, descr=tiddescr)
setfield_gc(p0, 10, descr=alendescr)
+ jump()
""")
def test_rewrite_assembler_new_and_new_array_fixed_to_malloc(self):
@@ -896,6 +916,7 @@
[]
p0 = new(descr=sdescr)
p1 = new_array(10, descr=adescr)
+ jump()
""", """
[]
p0 = call_malloc_nursery( \
@@ -905,17 +926,20 @@
p1 = int_add(p0, %(sdescr.size)d)
setfield_gc(p1, 4321, descr=tiddescr)
setfield_gc(p1, 10, descr=alendescr)
+ jump()
""")
def test_rewrite_assembler_round_up(self):
self.check_rewrite("""
[]
p0 = new_array(6, descr=bdescr)
+ jump()
""", """
[]
p0 = call_malloc_nursery(%(bdescr.basesize + 8)d)
setfield_gc(p0, 8765, descr=tiddescr)
setfield_gc(p0, 6, descr=blendescr)
+ jump()
""")
def test_rewrite_assembler_round_up_always(self):
@@ -925,6 +949,7 @@
p1 = new_array(5, descr=bdescr)
p2 = new_array(5, descr=bdescr)
p3 = new_array(5, descr=bdescr)
+ jump()
""", """
[]
p0 = call_malloc_nursery(%(4 * (bdescr.basesize + 8))d)
@@ -939,6 +964,7 @@
p3 = int_add(p2, %(bdescr.basesize + 8)d)
setfield_gc(p3, 8765, descr=tiddescr)
setfield_gc(p3, 5, descr=blendescr)
+ jump()
""")
def test_rewrite_assembler_minimal_size(self):
@@ -946,12 +972,14 @@
[]
p0 = new(descr=edescr)
p1 = new(descr=edescr)
+ jump()
""", """
[]
p0 = call_malloc_nursery(%(4*WORD)d)
setfield_gc(p0, 9000, descr=tiddescr)
p1 = int_add(p0, %(2*WORD)d)
setfield_gc(p1, 9000, descr=tiddescr)
+ jump()
""")
def test_rewrite_assembler_variable_size(self):
@@ -1086,6 +1114,7 @@
p1 = newunicode(10)
p2 = newunicode(i2)
p3 = newstr(i2)
+ jump()
""", """
[i2]
p0 = call_malloc_nursery( \
@@ -1093,15 +1122,20 @@
unicodedescr.basesize + 10 * unicodedescr.itemsize)d)
setfield_gc(p0, %(strdescr.tid)d, descr=tiddescr)
setfield_gc(p0, 14, descr=strlendescr)
+
p1 = int_add(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d)
setfield_gc(p1, %(unicodedescr.tid)d, descr=tiddescr)
setfield_gc(p1, 10, descr=unicodelendescr)
+
p2 = call_malloc_nursery_varsize(2, 4, i2, \
descr=unicodedescr)
setfield_gc(p2, i2, descr=unicodelendescr)
+
p3 = call_malloc_nursery_varsize(1, 1, i2, \
descr=strdescr)
setfield_gc(p3, i2, descr=strlendescr)
+
+ jump()
""")
def test_label_makes_size_unknown(self):
@@ -1134,7 +1168,8 @@
[i0, f0]
p0 = new_array(5, descr=bdescr)
p1 = new_array(5, descr=bdescr)
- stm_transaction_break(1)
+ call_may_force(12345, descr=calldescr2) # stm_transaction_break
+ guard_not_forced() []
p2 = new_array(5, descr=bdescr)
""", """
[i0, f0]
@@ -1146,7 +1181,8 @@
setfield_gc(p1, 8765, descr=tiddescr)
setfield_gc(p1, 5, descr=blendescr)
- stm_transaction_break(1)
+ call_may_force(12345, descr=calldescr2) # stm_transaction_break
+ guard_not_forced() []
p2 = call_malloc_nursery( \
%(bdescr.basesize + 8)d)
@@ -1188,6 +1224,7 @@
%(comment)s stm_read(p1)
i4 = getarrayitem_gc%(pure)s(p4, i1, descr=vdescr)
%(comment)s stm_read(p4)
+ $DUMMYALLOC
jump(p2)
""" % d, uxdescr=uxdescr, vdescr=vdescr)
@@ -1195,24 +1232,20 @@
self.check_rewrite("""
[p1, p2]
setfield_gc(p1, p2, descr=tzdescr) {50}
- jump()
""", """
[p1, p2]
cond_call_gc_wb(p1, descr=wbdescr) {50}
setfield_gc(p1, p2, descr=tzdescr) {50}
- jump()
""")
def test_stm_location_2(self):
self.check_rewrite("""
[i1]
i3 = getfield_raw(i1, descr=tydescr) {52}
- jump(i3)
""", """
[i1]
$INEV {52}
i3 = getfield_raw(i1, descr=tydescr) {52}
- jump(i3)
""")
def test_stm_location_3(self):
@@ -1240,8 +1273,8 @@
jump(i1)
""", """
[]
- p99 = call_malloc_nursery(16)
i1 = stm_should_break_transaction()
+ $DUMMYALLOC
jump(i1)
""")
@@ -1267,9 +1300,9 @@
jump(i1, i2)
""", """
[]
- p99 = call_malloc_nursery(16)
i1 = stm_should_break_transaction()
i2 = stm_should_break_transaction()
+ $DUMMYALLOC
jump(i1, i2)
""")
@@ -1285,7 +1318,7 @@
p2 = call_malloc_nursery(%(tdescr.size)d)
setfield_gc(p2, %(tdescr.tid)d, descr=tiddescr)
label()
- p99 = call_malloc_nursery(16)
i1 = stm_should_break_transaction()
+ $DUMMYALLOC
jump(i1)
""")
From noreply at buildbot.pypy.org Sun Aug 17 20:38:49 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Sun, 17 Aug 2014 20:38:49 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Fix the remaining tests from
llsupport/test/
Message-ID: <20140817183849.0F0681C332E@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72854:be297a6d9b9d
Date: 2014-08-17 20:38 +0200
http://bitbucket.org/pypy/pypy/changeset/be297a6d9b9d/
Log: Fix the remaining tests from llsupport/test/
diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py
--- a/rpython/jit/backend/llsupport/assembler.py
+++ b/rpython/jit/backend/llsupport/assembler.py
@@ -74,7 +74,7 @@
self.gc_minimal_size_in_nursery = gc_ll_descr.minimal_size_in_nursery
else:
self.gc_minimal_size_in_nursery = 0
- if hasattr(gc_ll_descr, 'gcheaderbuilder'):
+ if getattr(gc_ll_descr, 'gcheaderbuilder', None) is not None:
self.gc_size_of_header = gc_ll_descr.gcheaderbuilder.size_gc_header
else:
self.gc_size_of_header = WORD # for tests
diff --git a/rpython/jit/backend/llsupport/regalloc.py b/rpython/jit/backend/llsupport/regalloc.py
--- a/rpython/jit/backend/llsupport/regalloc.py
+++ b/rpython/jit/backend/llsupport/regalloc.py
@@ -650,8 +650,7 @@
def can_merge_with_next_guard(self, op, i, operations):
if (op.getopnum() == rop.CALL_MAY_FORCE or
op.getopnum() == rop.CALL_ASSEMBLER or
- op.getopnum() == rop.CALL_RELEASE_GIL or
- op.getopnum() == rop.STM_TRANSACTION_BREAK):
+ op.getopnum() == rop.CALL_RELEASE_GIL):
assert operations[i + 1].getopnum() == rop.GUARD_NOT_FORCED
return True
if (not op.is_comparison() and
diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py
--- a/rpython/jit/backend/llsupport/rewrite.py
+++ b/rpython/jit/backend/llsupport/rewrite.py
@@ -263,7 +263,7 @@
mallocs. (For all I know this latter case never occurs in
practice, but better safe than sorry.)
"""
- if self.gc_ll_descr.fielddescr_tid is not None:
+ if self.gc_ll_descr.fielddescr_tid is not None: # framework GC
assert (size & (WORD-1)) == 0, "size not aligned?"
addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_big_fixedsize')
args = [ConstInt(addr), ConstInt(size), ConstInt(typeid)]
diff --git a/rpython/jit/backend/llsupport/test/test_gc.py b/rpython/jit/backend/llsupport/test/test_gc.py
--- a/rpython/jit/backend/llsupport/test/test_gc.py
+++ b/rpython/jit/backend/llsupport/test/test_gc.py
@@ -184,7 +184,7 @@
rewriter = GcRewriterAssembler(gc_ll_descr, None)
newops = rewriter.newops
v_base = BoxPtr()
- rewriter.gen_write_barrier(v_base)
+ rewriter.gen_write_barrier(v_base, stm_location=None)
assert llop1.record == []
assert len(newops) == 1
assert newops[0].getopnum() == rop.COND_CALL_GC_WB
diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py
--- a/rpython/jit/backend/llsupport/test/test_gc_integration.py
+++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py
@@ -10,7 +10,7 @@
GcLLDescr_framework, GcCache, JitFrameDescrs
from rpython.jit.backend.detect_cpu import getcpuclass
from rpython.jit.backend.llsupport.symbolic import WORD
-from rpython.jit.backend.llsupport import jitframe
+from rpython.jit.backend.llsupport import jitframe, gcmap
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
from rpython.rtyper.annlowlevel import llhelper, llhelper_args
@@ -315,11 +315,13 @@
def test_malloc_slowpath(self):
def check(frame):
- expected_size = 1
+ # xxx for now we always have GCMAP_STM_LOCATION, but it should
+ # be added only if we really have stm in the first place
+ expected_size = 1 + gcmap.GCMAP_STM_LOCATION
idx = 0
if self.cpu.backend_name.startswith('arm'):
# jitframe fixed part is larger here
- expected_size = 2
+ expected_size = 2 + gcmap.GCMAP_STM_LOCATION
idx = 1
assert len(frame.jf_gcmap) == expected_size
if self.cpu.IS_64_BIT:
@@ -355,11 +357,11 @@
def check(frame):
x = frame.jf_gcmap
if self.cpu.IS_64_BIT:
- assert len(x) == 1
+ assert len(x) == 1 + gcmap.GCMAP_STM_LOCATION
assert (bin(x[0]).count('1') ==
'0b1111100000000000000001111111011110'.count('1'))
else:
- assert len(x) == 2
+ assert len(x) == 2 + gcmap.GCMAP_STM_LOCATION
s = bin(x[0]).count('1') + bin(x[1]).count('1')
assert s == 16
# all but two registers + some stuff on stack
diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py
--- a/rpython/jit/backend/x86/assembler.py
+++ b/rpython/jit/backend/x86/assembler.py
@@ -915,7 +915,16 @@
self.mc.MOV(self.heap_shadowstack_top(), ecx)
else:
# SUB [rootstacktop], WORD
- self.mc.SUB(self.heap_shadowstack_top(), WORD)
+ gcrootmap = self.cpu.gc_ll_descr.gcrootmap
+ rst = gcrootmap.get_root_stack_top_addr()
+ if rx86.fits_in_32bits(rst):
+ # SUB [rootstacktop], WORD
+ self.mc.SUB_ji8((self.SEGMENT_NO, rst), WORD)
+ else:
+ # MOV ebx, rootstacktop
+ # SUB [ebx], WORD
+ self.mc.MOV_ri(ebx.value, rst)
+ self.mc.SUB_mi8((self.SEGMENT_NO, ebx.value, 0), WORD)
def redirect_call_assembler(self, oldlooptoken, newlooptoken):
# some minimal sanity checking
@@ -2616,7 +2625,7 @@
else:
self.implement_guard(guard_token, 'AE') # JAE goes to "no, don't"
- def genop_guard_stm_transaction_break(self, op, guard_op, guard_token,
+ def XXXgenop_guard_stm_transaction_break(self, op, guard_op, guard_token,
arglocs, result_loc):
assert self.cpu.gc_ll_descr.stm
if not we_are_translated():
diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py
--- a/rpython/jit/backend/x86/regalloc.py
+++ b/rpython/jit/backend/x86/regalloc.py
@@ -1292,7 +1292,7 @@
need_lower_byte=True)
self.perform(op, [], resloc)
- def consider_stm_transaction_break(self, op, guard_op):
+ def XXXconsider_stm_transaction_break(self, op, guard_op):
self.perform_with_guard(op, guard_op, [], None)
def consider_jump(self, op):
@@ -1444,7 +1444,6 @@
or num == rop.CALL_MAY_FORCE
or num == rop.CALL_ASSEMBLER
or num == rop.CALL_RELEASE_GIL
- or num == rop.STM_TRANSACTION_BREAK
or num == rop.STM_SHOULD_BREAK_TRANSACTION):
oplist_with_guard[num] = value
oplist[num] = add_none_argument(value)
From noreply at buildbot.pypy.org Sun Aug 17 22:43:40 2014
From: noreply at buildbot.pypy.org (mattip)
Date: Sun, 17 Aug 2014 22:43:40 +0200 (CEST)
Subject: [pypy-commit] pypy ufuncapi: fix api to accept {&func1, &func2},
probably could be cleaner.
Message-ID: <20140817204340.3CA5D1C347F@cobra.cs.uni-duesseldorf.de>
Author: mattip
Branch: ufuncapi
Changeset: r72855:2f354c411ced
Date: 2014-08-17 23:42 +0300
http://bitbucket.org/pypy/pypy/changeset/2f354c411ced/
Log: fix api to accept {&func1, &func2}, probably could be cleaner.
diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py
--- a/pypy/module/cpyext/ndarrayobject.py
+++ b/pypy/module/cpyext/ndarrayobject.py
@@ -295,9 +295,8 @@
GenericUfunc = lltype.FuncType([rffi.CArrayPtr(rffi.CCHARP), npy_intpp, npy_intpp,
rffi.VOIDP], lltype.Void)
gufunctype = lltype.Ptr(GenericUfunc)
-# XXX the signature is wrong, it should be an array of gufunctype, but
-# XXX rffi.CArrayPtr(gufunctype) does not seem to work ???
- at cpython_api([gufunctype, rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t,
+# XXX single rffi.CArrayPtr(gufunctype) does not work, this does, why???
+ at cpython_api([rffi.CArrayPtr(rffi.CArrayPtr(gufunctype)), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t,
Py_ssize_t, Py_ssize_t, rffi.CCHARP, rffi.CCHARP, Py_ssize_t,
rffi.CCHARP], PyObject)
def _PyUFunc_FromFuncAndDataAndSignature(space, funcs, data, types, ntypes,
@@ -305,8 +304,7 @@
funcs_w = [None] * ntypes
dtypes_w = [None] * ntypes * (nin + nout)
for i in range(ntypes):
- # XXX this should be 'funcs[i]' not 'funcs'
- funcs_w[i] = W_GenericUFuncCaller(funcs)
+ funcs_w[i] = W_GenericUFuncCaller(rffi.cast(gufunctype, funcs[i]))
for i in range(ntypes*(nin+nout)):
dtypes_w[i] = get_dtype_cache(space).dtypes_by_num[ord(types[i])]
w_funcs = space.newlist(funcs_w)
diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py
--- a/pypy/module/cpyext/test/test_ndarrayobject.py
+++ b/pypy/module/cpyext/test/test_ndarrayobject.py
@@ -324,10 +324,10 @@
PyObject * retval;
/* XXX should be 'funcs', not 'funcs[1]' but how to define an array of
function pointers in ndarrayobject.py? */
- retval = _PyUFunc_FromFuncAndDataAndSignature(funcs[1],
+ printf("calling w/funcs[0] = 0x%x, funcs[1] = 0x%x \\n", funcs[0], funcs[1]);
+ retval = _PyUFunc_FromFuncAndDataAndSignature(funcs,
array_data, types, 2, 1, 1, PyUFunc_None,
"times2", "times2_docstring", 0, "()->()");
- Py_INCREF(retval);
return retval;
"""
),
From noreply at buildbot.pypy.org Mon Aug 18 00:10:06 2014
From: noreply at buildbot.pypy.org (mattip)
Date: Mon, 18 Aug 2014 00:10:06 +0200 (CEST)
Subject: [pypy-commit] pypy ufuncapi: silence a non-fatal build error on
windows
Message-ID: <20140817221006.D19EA1C059C@cobra.cs.uni-duesseldorf.de>
Author: mattip
Branch: ufuncapi
Changeset: r72856:a7730d9255c3
Date: 2014-08-18 00:30 +0300
http://bitbucket.org/pypy/pypy/changeset/a7730d9255c3/
Log: silence a non-fatal build error on windows
diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py
--- a/pypy/goal/targetpypystandalone.py
+++ b/pypy/goal/targetpypystandalone.py
@@ -306,9 +306,10 @@
return PyPyJitPolicy(pypy_hooks)
def get_entry_point(self, config):
- from pypy.tool.lib_pypy import import_from_lib_pypy
- rebuild = import_from_lib_pypy('ctypes_config_cache/rebuild')
- rebuild.try_rebuild()
+ if sys.platform != 'win32':
+ from pypy.tool.lib_pypy import import_from_lib_pypy
+ rebuild = import_from_lib_pypy('ctypes_config_cache/rebuild')
+ rebuild.try_rebuild()
space = make_objspace(config)
From noreply at buildbot.pypy.org Mon Aug 18 00:10:08 2014
From: noreply at buildbot.pypy.org (mattip)
Date: Mon, 18 Aug 2014 00:10:08 +0200 (CEST)
Subject: [pypy-commit] pypy ufuncapi: translation fixes
Message-ID: <20140817221008.0BA781C059C@cobra.cs.uni-duesseldorf.de>
Author: mattip
Branch: ufuncapi
Changeset: r72857:28e42d741c8a
Date: 2014-08-18 01:09 +0300
http://bitbucket.org/pypy/pypy/changeset/28e42d741c8a/
Log: translation fixes
diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py
--- a/pypy/module/cpyext/ndarrayobject.py
+++ b/pypy/module/cpyext/ndarrayobject.py
@@ -312,6 +312,9 @@
w_signature = rffi.charp2str(signature)
w_doc = rffi.charp2str(doc)
w_name = rffi.charp2str(name)
- ufunc_generic = ufuncs.frompyfunc(space, w_funcs, nin, nout, w_dtypes,
- w_signature, identity, w_name, w_doc)
+ w_nin = int(nin)
+ w_nout = int(nout)
+ w_identity = space.wrap(identity)
+ ufunc_generic = ufuncs.frompyfunc(space, w_funcs, w_nin, w_nout, w_dtypes,
+ w_signature, w_identity, w_name, w_doc)
return ufunc_generic
From noreply at buildbot.pypy.org Mon Aug 18 01:05:41 2014
From: noreply at buildbot.pypy.org (numerodix)
Date: Mon, 18 Aug 2014 01:05:41 +0200 (CEST)
Subject: [pypy-commit] pypy py3.3-fixes3: add missing name attribute for
MD5Type
Message-ID: <20140817230541.846C71C33E2@cobra.cs.uni-duesseldorf.de>
Author: Martin Matusiak
Branch: py3.3-fixes3
Changeset: r72858:18c94a38e38e
Date: 2014-08-17 16:56 +0200
http://bitbucket.org/pypy/pypy/changeset/18c94a38e38e/
Log: add missing name attribute for MD5Type
diff --git a/pypy/module/_md5/interp_md5.py b/pypy/module/_md5/interp_md5.py
--- a/pypy/module/_md5/interp_md5.py
+++ b/pypy/module/_md5/interp_md5.py
@@ -52,6 +52,7 @@
copy = interp2app(W_MD5.copy_w),
digest_size = 16,
block_size = 64,
+ name = 'md5',
__doc__ = """md5(arg) -> return new md5 object.
If arg is present, the method call update(arg) is made.""")
diff --git a/pypy/module/_md5/test/test_md5.py b/pypy/module/_md5/test/test_md5.py
--- a/pypy/module/_md5/test/test_md5.py
+++ b/pypy/module/_md5/test/test_md5.py
@@ -19,6 +19,12 @@
""")
+ def test_name(self):
+ """
+ md5.name should be 'md5'.
+ """
+ assert self.md5.md5().name == 'md5'
+
def test_digest_size(self):
"""
md5.digest_size should be 16.
From noreply at buildbot.pypy.org Mon Aug 18 01:05:42 2014
From: noreply at buildbot.pypy.org (numerodix)
Date: Mon, 18 Aug 2014 01:05:42 +0200 (CEST)
Subject: [pypy-commit] pypy py3.3-fixes3: fix unbound variable
Message-ID: <20140817230542.B568D1C33E2@cobra.cs.uni-duesseldorf.de>
Author: Martin Matusiak
Branch: py3.3-fixes3
Changeset: r72859:7bdae69fd02e
Date: 2014-08-17 18:03 +0200
http://bitbucket.org/pypy/pypy/changeset/7bdae69fd02e/
Log: fix unbound variable
diff --git a/lib-python/3/test/test_hashlib.py b/lib-python/3/test/test_hashlib.py
--- a/lib-python/3/test/test_hashlib.py
+++ b/lib-python/3/test/test_hashlib.py
@@ -142,7 +142,7 @@
def test_hexdigest(self):
for cons in self.hash_constructors:
h = cons()
- assert isinstance(h.digest(), bytes), name
+ assert isinstance(h.digest(), bytes), cons.__name__
self.assertEqual(hexstr(h.digest()), h.hexdigest())
def test_large_update(self):
From noreply at buildbot.pypy.org Mon Aug 18 01:05:44 2014
From: noreply at buildbot.pypy.org (numerodix)
Date: Mon, 18 Aug 2014 01:05:44 +0200 (CEST)
Subject: [pypy-commit] pypy py3.3-fixes3: port _sha256.py to py3
Message-ID: <20140817230544.0539A1C33E2@cobra.cs.uni-duesseldorf.de>
Author: Martin Matusiak
Branch: py3.3-fixes3
Changeset: r72860:27a13b5357da
Date: 2014-08-17 19:49 +0200
http://bitbucket.org/pypy/pypy/changeset/27a13b5357da/
Log: port _sha256.py to py3
diff --git a/lib_pypy/_sha256.py b/lib_pypy/_sha256.py
--- a/lib_pypy/_sha256.py
+++ b/lib_pypy/_sha256.py
@@ -201,7 +201,7 @@
dig = []
for i in sha_info['digest']:
dig.extend([ ((i>>24) & 0xff), ((i>>16) & 0xff), ((i>>8) & 0xff), (i & 0xff) ])
- return ''.join([chr(i) for i in dig])
+ return bytes(dig)
class sha256(object):
digest_size = digestsize = SHA_DIGESTSIZE
@@ -219,7 +219,7 @@
return sha_final(self._sha.copy())[:self._sha['digestsize']]
def hexdigest(self):
- return ''.join(['%.2x' % ord(i) for i in self.digest()])
+ return ''.join(['%.2x' % i for i in self.digest()])
def copy(self):
new = sha256.__new__(sha256)
@@ -240,7 +240,7 @@
return new
def test():
- a_str = "just a test string"
+ a_str = b"just a test string"
assert 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' == sha256().hexdigest()
assert 'd7b553c6f09ac85d142415f857c5310f3bbbe7cdd787cce4b985acedd585266f' == sha256(a_str).hexdigest()
From noreply at buildbot.pypy.org Mon Aug 18 01:05:45 2014
From: noreply at buildbot.pypy.org (numerodix)
Date: Mon, 18 Aug 2014 01:05:45 +0200 (CEST)
Subject: [pypy-commit] pypy py3.3-fixes3: port _sha512.py module test to py3
Message-ID: <20140817230545.4654A1C33E2@cobra.cs.uni-duesseldorf.de>
Author: Martin Matusiak
Branch: py3.3-fixes3
Changeset: r72861:9852a31d49d9
Date: 2014-08-17 19:50 +0200
http://bitbucket.org/pypy/pypy/changeset/9852a31d49d9/
Log: port _sha512.py module test to py3
diff --git a/lib_pypy/_sha512.py b/lib_pypy/_sha512.py
--- a/lib_pypy/_sha512.py
+++ b/lib_pypy/_sha512.py
@@ -270,7 +270,7 @@
def test():
import _sha512
- a_str = "just a test string"
+ a_str = b"just a test string"
assert _sha512.sha512().hexdigest() == sha512().hexdigest()
assert _sha512.sha512(a_str).hexdigest() == sha512(a_str).hexdigest()
From noreply at buildbot.pypy.org Mon Aug 18 01:05:46 2014
From: noreply at buildbot.pypy.org (numerodix)
Date: Mon, 18 Aug 2014 01:05:46 +0200 (CEST)
Subject: [pypy-commit] pypy py3.3-fixes3: add .name attribute for all
instances of shaXXX classes
Message-ID: <20140817230546.8E0981C33E2@cobra.cs.uni-duesseldorf.de>
Author: Martin Matusiak
Branch: py3.3-fixes3
Changeset: r72862:98b3c0bab1fe
Date: 2014-08-17 20:14 +0200
http://bitbucket.org/pypy/pypy/changeset/98b3c0bab1fe/
Log: add .name attribute for all instances of shaXXX classes
diff --git a/lib_pypy/_sha1.py b/lib_pypy/_sha1.py
--- a/lib_pypy/_sha1.py
+++ b/lib_pypy/_sha1.py
@@ -123,6 +123,8 @@
def __init__(self):
"Initialisation."
+ self.name = 'sha'
+
# Initial message length in bits(!).
self.length = 0
self.count = [0, 0]
@@ -349,6 +351,7 @@
"""
crypto = sha()
+ crypto.name = 'sha1'
if arg:
crypto.update(arg)
diff --git a/lib_pypy/_sha256.py b/lib_pypy/_sha256.py
--- a/lib_pypy/_sha256.py
+++ b/lib_pypy/_sha256.py
@@ -208,6 +208,7 @@
block_size = SHA_BLOCKSIZE
def __init__(self, s=None):
+ self.name = 'sha256'
self._sha = sha_init()
if s:
sha_update(self._sha, s)
@@ -230,6 +231,7 @@
digest_size = digestsize = 28
def __init__(self, s=None):
+ self.name = 'sha224'
self._sha = sha224_init()
if s:
sha_update(self._sha, s)
diff --git a/lib_pypy/_sha512.py b/lib_pypy/_sha512.py
--- a/lib_pypy/_sha512.py
+++ b/lib_pypy/_sha512.py
@@ -236,6 +236,7 @@
block_size = SHA_BLOCKSIZE
def __init__(self, s=None):
+ self.name = 'sha512'
self._sha = sha_init()
if s:
sha_update(self._sha, s)
@@ -258,6 +259,7 @@
digest_size = digestsize = 48
def __init__(self, s=None):
+ self.name = 'sha384'
self._sha = sha384_init()
if s:
sha_update(self._sha, s)
From noreply at buildbot.pypy.org Mon Aug 18 01:05:47 2014
From: noreply at buildbot.pypy.org (numerodix)
Date: Mon, 18 Aug 2014 01:05:47 +0200 (CEST)
Subject: [pypy-commit] pypy py3.3-fixes3: add tests for .name attribute on
sha objects
Message-ID: <20140817230547.B3C8A1C33E2@cobra.cs.uni-duesseldorf.de>
Author: Martin Matusiak
Branch: py3.3-fixes3
Changeset: r72863:51ec894e3d52
Date: 2014-08-17 20:17 +0200
http://bitbucket.org/pypy/pypy/changeset/51ec894e3d52/
Log: add tests for .name attribute on sha objects
diff --git a/pypy/module/test_lib_pypy/test_sha_extra.py b/pypy/module/test_lib_pypy/test_sha_extra.py
--- a/pypy/module/test_lib_pypy/test_sha_extra.py
+++ b/pypy/module/test_lib_pypy/test_sha_extra.py
@@ -37,3 +37,30 @@
assert _sha.sha1().digest_size == 20
assert _sha.sha1().digestsize == 20
assert _sha.sha1().block_size == 64
+
+ assert _sha.sha().name == 'sha'
+ assert _sha.sha1().name == 'sha1'
+
+
+class AppTestSHA256:
+ spaceconfig = dict(usemodules=('struct',))
+
+ def setup_class(cls):
+ cls.w__sha256 = import_lib_pypy(cls.space, '_sha256')
+
+ def test_attributes(self):
+ _sha256 = self._sha256
+ assert _sha256.sha224().name == 'sha224'
+ assert _sha256.sha256().name == 'sha256'
+
+
+class AppTestSHA512:
+ spaceconfig = dict(usemodules=('struct',))
+
+ def setup_class(cls):
+ cls.w__sha512 = import_lib_pypy(cls.space, '_sha512')
+
+ def test_attributes(self):
+ _sha512 = self._sha512
+ assert _sha512.sha384().name == 'sha384'
+ assert _sha512.sha512().name == 'sha512'
From noreply at buildbot.pypy.org Mon Aug 18 01:05:49 2014
From: noreply at buildbot.pypy.org (pjenvey)
Date: Mon, 18 Aug 2014 01:05:49 +0200 (CEST)
Subject: [pypy-commit] pypy py3.3: Merged in numerodix/pypy/py3.3-fixes3
(pull request #270)
Message-ID: <20140817230549.01B711C33E2@cobra.cs.uni-duesseldorf.de>
Author: Philip Jenvey
Branch: py3.3
Changeset: r72864:0c8f0c10188c
Date: 2014-08-17 16:05 -0700
http://bitbucket.org/pypy/pypy/changeset/0c8f0c10188c/
Log: Merged in numerodix/pypy/py3.3-fixes3 (pull request #270)
py3.3: fixes for failing hashlib tests
diff --git a/lib-python/3/test/test_hashlib.py b/lib-python/3/test/test_hashlib.py
--- a/lib-python/3/test/test_hashlib.py
+++ b/lib-python/3/test/test_hashlib.py
@@ -142,7 +142,7 @@
def test_hexdigest(self):
for cons in self.hash_constructors:
h = cons()
- assert isinstance(h.digest(), bytes), name
+ assert isinstance(h.digest(), bytes), cons.__name__
self.assertEqual(hexstr(h.digest()), h.hexdigest())
def test_large_update(self):
diff --git a/lib_pypy/_sha1.py b/lib_pypy/_sha1.py
--- a/lib_pypy/_sha1.py
+++ b/lib_pypy/_sha1.py
@@ -123,6 +123,8 @@
def __init__(self):
"Initialisation."
+ self.name = 'sha'
+
# Initial message length in bits(!).
self.length = 0
self.count = [0, 0]
@@ -349,6 +351,7 @@
"""
crypto = sha()
+ crypto.name = 'sha1'
if arg:
crypto.update(arg)
diff --git a/lib_pypy/_sha256.py b/lib_pypy/_sha256.py
--- a/lib_pypy/_sha256.py
+++ b/lib_pypy/_sha256.py
@@ -201,13 +201,14 @@
dig = []
for i in sha_info['digest']:
dig.extend([ ((i>>24) & 0xff), ((i>>16) & 0xff), ((i>>8) & 0xff), (i & 0xff) ])
- return ''.join([chr(i) for i in dig])
+ return bytes(dig)
class sha256(object):
digest_size = digestsize = SHA_DIGESTSIZE
block_size = SHA_BLOCKSIZE
def __init__(self, s=None):
+ self.name = 'sha256'
self._sha = sha_init()
if s:
sha_update(self._sha, s)
@@ -219,7 +220,7 @@
return sha_final(self._sha.copy())[:self._sha['digestsize']]
def hexdigest(self):
- return ''.join(['%.2x' % ord(i) for i in self.digest()])
+ return ''.join(['%.2x' % i for i in self.digest()])
def copy(self):
new = sha256.__new__(sha256)
@@ -230,6 +231,7 @@
digest_size = digestsize = 28
def __init__(self, s=None):
+ self.name = 'sha224'
self._sha = sha224_init()
if s:
sha_update(self._sha, s)
@@ -240,7 +242,7 @@
return new
def test():
- a_str = "just a test string"
+ a_str = b"just a test string"
assert 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' == sha256().hexdigest()
assert 'd7b553c6f09ac85d142415f857c5310f3bbbe7cdd787cce4b985acedd585266f' == sha256(a_str).hexdigest()
diff --git a/lib_pypy/_sha512.py b/lib_pypy/_sha512.py
--- a/lib_pypy/_sha512.py
+++ b/lib_pypy/_sha512.py
@@ -236,6 +236,7 @@
block_size = SHA_BLOCKSIZE
def __init__(self, s=None):
+ self.name = 'sha512'
self._sha = sha_init()
if s:
sha_update(self._sha, s)
@@ -258,6 +259,7 @@
digest_size = digestsize = 48
def __init__(self, s=None):
+ self.name = 'sha384'
self._sha = sha384_init()
if s:
sha_update(self._sha, s)
@@ -270,7 +272,7 @@
def test():
import _sha512
- a_str = "just a test string"
+ a_str = b"just a test string"
assert _sha512.sha512().hexdigest() == sha512().hexdigest()
assert _sha512.sha512(a_str).hexdigest() == sha512(a_str).hexdigest()
diff --git a/pypy/module/_md5/interp_md5.py b/pypy/module/_md5/interp_md5.py
--- a/pypy/module/_md5/interp_md5.py
+++ b/pypy/module/_md5/interp_md5.py
@@ -52,6 +52,7 @@
copy = interp2app(W_MD5.copy_w),
digest_size = 16,
block_size = 64,
+ name = 'md5',
__doc__ = """md5(arg) -> return new md5 object.
If arg is present, the method call update(arg) is made.""")
diff --git a/pypy/module/_md5/test/test_md5.py b/pypy/module/_md5/test/test_md5.py
--- a/pypy/module/_md5/test/test_md5.py
+++ b/pypy/module/_md5/test/test_md5.py
@@ -19,6 +19,12 @@
""")
+ def test_name(self):
+ """
+ md5.name should be 'md5'.
+ """
+ assert self.md5.md5().name == 'md5'
+
def test_digest_size(self):
"""
md5.digest_size should be 16.
diff --git a/pypy/module/test_lib_pypy/test_sha_extra.py b/pypy/module/test_lib_pypy/test_sha_extra.py
--- a/pypy/module/test_lib_pypy/test_sha_extra.py
+++ b/pypy/module/test_lib_pypy/test_sha_extra.py
@@ -37,3 +37,30 @@
assert _sha.sha1().digest_size == 20
assert _sha.sha1().digestsize == 20
assert _sha.sha1().block_size == 64
+
+ assert _sha.sha().name == 'sha'
+ assert _sha.sha1().name == 'sha1'
+
+
+class AppTestSHA256:
+ spaceconfig = dict(usemodules=('struct',))
+
+ def setup_class(cls):
+ cls.w__sha256 = import_lib_pypy(cls.space, '_sha256')
+
+ def test_attributes(self):
+ _sha256 = self._sha256
+ assert _sha256.sha224().name == 'sha224'
+ assert _sha256.sha256().name == 'sha256'
+
+
+class AppTestSHA512:
+ spaceconfig = dict(usemodules=('struct',))
+
+ def setup_class(cls):
+ cls.w__sha512 = import_lib_pypy(cls.space, '_sha512')
+
+ def test_attributes(self):
+ _sha512 = self._sha512
+ assert _sha512.sha384().name == 'sha384'
+ assert _sha512.sha512().name == 'sha512'
From noreply at buildbot.pypy.org Mon Aug 18 06:58:02 2014
From: noreply at buildbot.pypy.org (mattip)
Date: Mon, 18 Aug 2014 06:58:02 +0200 (CEST)
Subject: [pypy-commit] pypy ufuncapi: fix 'data' and 'identity' handling
Message-ID: <20140818045802.44FD01C347F@cobra.cs.uni-duesseldorf.de>
Author: mattip
Branch: ufuncapi
Changeset: r72865:481393e4eca5
Date: 2014-08-18 07:57 +0300
http://bitbucket.org/pypy/pypy/changeset/481393e4eca5/
Log: fix 'data' and 'identity' handling
diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py
--- a/pypy/module/cpyext/ndarrayobject.py
+++ b/pypy/module/cpyext/ndarrayobject.py
@@ -264,15 +264,15 @@
CCHARP_SIZE = _get_bitsize('P') / 8
class W_GenericUFuncCaller(W_Root):
- def __init__(self, func):
+ def __init__(self, func, data):
self.func = func
+ self.data = data
def descr_call(self, space, __args__):
args_w, kwds_w = __args__.unpack()
dataps = alloc_raw_storage(CCHARP_SIZE * len(args_w), track_allocation=False)
dims = alloc_raw_storage(LONG_SIZE * len(args_w), track_allocation=False)
steps = alloc_raw_storage(LONG_SIZE * len(args_w), track_allocation=False)
- user_data = None
for i in range(len(args_w)):
arg_i = args_w[i]
assert isinstance(arg_i, W_NDimArray)
@@ -282,7 +282,7 @@
raw_storage_setitem(steps, LONG_SIZE * i, rffi.cast(rffi.LONG, arg_i.get_dtype().elsize))
try:
self.func(rffi.cast(rffi.CArrayPtr(rffi.CCHARP), dataps),
- rffi.cast(npy_intpp, dims), rffi.cast(npy_intpp, steps), user_data)
+ rffi.cast(npy_intpp, dims), rffi.cast(npy_intpp, steps), self.data)
finally:
free_raw_storage(dataps, track_allocation=False)
free_raw_storage(dims, track_allocation=False)
@@ -304,7 +304,7 @@
funcs_w = [None] * ntypes
dtypes_w = [None] * ntypes * (nin + nout)
for i in range(ntypes):
- funcs_w[i] = W_GenericUFuncCaller(rffi.cast(gufunctype, funcs[i]))
+ funcs_w[i] = W_GenericUFuncCaller(rffi.cast(gufunctype, funcs[i]), data)
for i in range(ntypes*(nin+nout)):
dtypes_w[i] = get_dtype_cache(space).dtypes_by_num[ord(types[i])]
w_funcs = space.newlist(funcs_w)
@@ -312,9 +312,7 @@
w_signature = rffi.charp2str(signature)
w_doc = rffi.charp2str(doc)
w_name = rffi.charp2str(name)
- w_nin = int(nin)
- w_nout = int(nout)
w_identity = space.wrap(identity)
- ufunc_generic = ufuncs.frompyfunc(space, w_funcs, w_nin, w_nout, w_dtypes,
+ ufunc_generic = ufuncs.frompyfunc(space, w_funcs, nin, nout, w_dtypes,
w_signature, w_identity, w_name, w_doc)
return ufunc_generic
diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py
--- a/pypy/module/micronumpy/ufuncs.py
+++ b/pypy/module/micronumpy/ufuncs.py
@@ -1018,9 +1018,12 @@
if space.is_none(w_identity):
identity = None
- else:
+ elif space.isinstance_w(w_identity, space.w_int):
identity = \
- descriptor.get_dtype_cache(space).w_longdtype.box(w_identity)
+ descriptor.get_dtype_cache(space).w_longdtype.box(space.int_w(w_identity))
+ else:
+ raise oefmt(space.w_ValueError,
+ 'identity must be None or an int')
w_ret = W_UfuncGeneric(space, func, name, identity, nin, nout, dtypes, signature,
match_dtypes=match_dtypes)
From noreply at buildbot.pypy.org Mon Aug 18 09:55:18 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Mon, 18 Aug 2014 09:55:18 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: create a new
AddressStack only if needed
Message-ID: <20140818075518.53D481C34DB@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72866:e9322b528a2c
Date: 2014-08-18 09:54 +0200
http://bitbucket.org/pypy/pypy/changeset/e9322b528a2c/
Log: create a new AddressStack only if needed
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -2124,12 +2124,13 @@
#
# get rid of objects pointing to pinned objects that were not
# visited
- new_old_objects_pointing_to_pinned = self.AddressStack()
- self.old_objects_pointing_to_pinned.foreach(
- self._sweep_old_objects_pointing_to_pinned,
- new_old_objects_pointing_to_pinned)
- self.old_objects_pointing_to_pinned.delete()
- self.old_objects_pointing_to_pinned = new_old_objects_pointing_to_pinned
+ if self.old_objects_pointing_to_pinned.non_empty():
+ new_old_objects_pointing_to_pinned = self.AddressStack()
+ self.old_objects_pointing_to_pinned.foreach(
+ self._sweep_old_objects_pointing_to_pinned,
+ new_old_objects_pointing_to_pinned)
+ self.old_objects_pointing_to_pinned.delete()
+ self.old_objects_pointing_to_pinned = new_old_objects_pointing_to_pinned
self.gc_state = STATE_SWEEPING
#END MARKING
elif self.gc_state == STATE_SWEEPING:
From noreply at buildbot.pypy.org Mon Aug 18 10:42:18 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Mon, 18 Aug 2014 10:42:18 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: import stmgc/3bfb99304c6d
Message-ID: <20140818084218.47DC21C059C@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72867:a674757895ed
Date: 2014-08-18 10:11 +0200
http://bitbucket.org/pypy/pypy/changeset/a674757895ed/
Log: import stmgc/3bfb99304c6d
diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision
--- a/rpython/translator/stm/src_stm/revision
+++ b/rpython/translator/stm/src_stm/revision
@@ -1,1 +1,1 @@
-29376f500349
+e85ce411f190
diff --git a/rpython/translator/stm/src_stm/stm/rewind_setjmp.c b/rpython/translator/stm/src_stm/stm/rewind_setjmp.c
--- a/rpython/translator/stm/src_stm/stm/rewind_setjmp.c
+++ b/rpython/translator/stm/src_stm/stm/rewind_setjmp.c
@@ -38,8 +38,17 @@
size_t stack_size, ssstack_size;
assert(rjthread->head != NULL);
- stop = rjthread->head->frame_base;
ssstop = rjthread->head->shadowstack_base;
+ if (((long)ssstop) & 1) {
+ /* PyPy's JIT: 'head->frame_base' is missing; use directly 'head',
+ which should be at the end of the frame (and doesn't need itself
+ to be copied because it contains immutable data only) */
+ ssstop = ((char *)ssstop) - 1;
+ stop = (char *)rjthread->head;
+ }
+ else {
+ stop = rjthread->head->frame_base;
+ }
assert(stop >= base);
assert(ssstop <= ssbase);
stack_size = stop - base;
diff --git a/rpython/translator/stm/src_stm/stm/rewind_setjmp.h b/rpython/translator/stm/src_stm/stm/rewind_setjmp.h
--- a/rpython/translator/stm/src_stm/stm/rewind_setjmp.h
+++ b/rpython/translator/stm/src_stm/stm/rewind_setjmp.h
@@ -54,9 +54,12 @@
************************************************************/
typedef struct _rewind_jmp_buf {
- char *frame_base;
char *shadowstack_base;
struct _rewind_jmp_buf *prev;
+ char *frame_base;
+ /* NB: PyPy's JIT has got details of this structure hard-coded,
+ as follows: it uses 2 words only (so frame_base is invalid)
+ and sets the lowest bit of 'shadowstack_base' to tell this */
} rewind_jmp_buf;
typedef struct {
@@ -72,6 +75,7 @@
/* remember the current stack and ss_stack positions */
#define rewind_jmp_enterframe(rjthread, rjbuf, ss) do { \
+ assert((((long)(ss)) & 1) == 0); \
(rjbuf)->frame_base = __builtin_frame_address(0); \
(rjbuf)->shadowstack_base = (char *)(ss); \
(rjbuf)->prev = (rjthread)->head; \
From noreply at buildbot.pypy.org Mon Aug 18 10:42:19 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Mon, 18 Aug 2014 10:42:19 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: in-progress
Message-ID: <20140818084219.87BC51C059C@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72868:8ff5b23d8b84
Date: 2014-08-18 10:41 +0200
http://bitbucket.org/pypy/pypy/changeset/8ff5b23d8b84/
Log: in-progress
diff --git a/rpython/jit/backend/x86/arch.py b/rpython/jit/backend/x86/arch.py
--- a/rpython/jit/backend/x86/arch.py
+++ b/rpython/jit/backend/x86/arch.py
@@ -16,7 +16,7 @@
# +--------------------+ <== aligned to 16 bytes
# | return address |
# +--------------------+ ------------------------.
-# | resume buf (if STM)| STM_FRAME_FIXED_SIZE |
+# | rewind_jmp_buf(STM)| STM_FRAME_FIXED_SIZE |
# +--------------------+ ----------------------. |
# | saved regs | FRAME_FIXED_SIZE | |
# +--------------------+ --------------------. | |
@@ -46,18 +46,9 @@
assert PASS_ON_MY_FRAME >= 12 # asmgcc needs at least JIT_USE_WORDS + 3
-# The STM resume buffer (on x86-64) is four words wide. Actually, clang
-# uses three words (see test_stm.py): rbp, rip, rsp. But the value of
-# rbp is not interesting for the JIT-generated machine code. So the
-# STM_JMPBUF_OFS is the offset from the stack top to the start of the
-# buffer, with only words at offset +1 and +2 in this buffer being
-# meaningful. We use ebp, i.e. the word at offset +0, to store the
-# resume counter.
-
-STM_RESUME_BUF_WORDS = 4
-STM_FRAME_FIXED_SIZE = FRAME_FIXED_SIZE + STM_RESUME_BUF_WORDS
-STM_JMPBUF_OFS = WORD * FRAME_FIXED_SIZE
-STM_JMPBUF_OFS_RBP = STM_JMPBUF_OFS + 0 * WORD
-STM_JMPBUF_OFS_RIP = STM_JMPBUF_OFS + 1 * WORD
-STM_JMPBUF_OFS_RSP = STM_JMPBUF_OFS + 2 * WORD
-STM_OLD_SHADOWSTACK = STM_JMPBUF_OFS + 3 * WORD
+# The STM rewind_jmp_buf (on x86-64) is two words wide:
+STM_REWIND_JMP_BUF_WORDS = 2
+STM_FRAME_FIXED_SIZE = FRAME_FIXED_SIZE + STM_REWIND_JMP_BUF_WORDS
+STM_JMPBUF_OFS = WORD * FRAME_FIXED_SIZE
+STM_SHADOWSTACK_BASE_OFS = STM_JMPBUF_OFS + 0 * WORD
+STM_PREV_OFS = STM_JMPBUF_OFS + 1 * WORD
diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py
--- a/rpython/jit/backend/x86/assembler.py
+++ b/rpython/jit/backend/x86/assembler.py
@@ -19,8 +19,7 @@
from rpython.jit.backend.x86.arch import (
FRAME_FIXED_SIZE, WORD, IS_X86_64, JITFRAME_FIXED_SIZE, IS_X86_32,
PASS_ON_MY_FRAME, STM_FRAME_FIXED_SIZE, STM_JMPBUF_OFS,
- STM_JMPBUF_OFS_RIP, STM_JMPBUF_OFS_RSP, STM_JMPBUF_OFS_RBP,
- STM_OLD_SHADOWSTACK)
+ STM_SHADOWSTACK_BASE_OFS, STM_PREV_OFS)
from rpython.jit.backend.x86.regloc import (eax, ecx, edx, ebx, esp, ebp, esi,
xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, r8, r9, r10, r11, edi,
r12, r13, r14, r15, X86_64_SCRATCH_REG, X86_64_XMM_SCRATCH_REG,
@@ -886,45 +885,83 @@
gcrootmap = self.cpu.gc_ll_descr.gcrootmap
return self.heap_tl(gcrootmap.get_root_stack_top_addr())
+ def heap_rjthread(self):
+ """STM: Return an AddressLoc for '&stm_thread_local.rjthread'."""
+ return self.heap_tl(rstm.adr_rjthread)
+
+ def heap_rjthread_head(self):
+ """STM: Return an AddressLoc for '&stm_thread_local.rjthread.head'."""
+ return self.heap_tl(rstm.adr_rjthread_head)
+
+ def heap_rjthread_moved_off_base(self):
+ """STM: AddressLoc for '&stm_thread_local.rjthread.moved_off_base'."""
+ return self.heap_tl(rstm.adr_rjthread_moved_off_base)
+
def _call_header_shadowstack(self):
# put the frame in ebp on the shadowstack for the GC to find
# (ebp is a writeable object and does not need a write-barrier
# again (ensured by the code calling the loop))
- self.mc.MOV(ebx, self.heap_shadowstack_top())
+ mc = self.mc
+ mc.MOV(ebx, self.heap_shadowstack_top())
+ mc.MOV_mr((self.SEGMENT_NO, ebx.value, 0), ebp.value)
+ # MOV [ebx], ebp
if self.cpu.gc_ll_descr.stm:
- self.mc.MOV_mi((self.SEGMENT_NO, ebx.value, 0),
- rstm.stm_stack_marker_new) # MOV [ebx], MARKER_NEW
- self.mc.MOV_mr((self.SEGMENT_NO, ebx.value, WORD),
- ebp.value) # MOV [ebx+WORD], ebp
- self.mc.MOV_sr(STM_OLD_SHADOWSTACK, ebx.value)
- # MOV [esp+xx], ebx
- self.mc.ADD_ri(ebx.value, 2 * WORD)
+ # inlining stm_rewind_jmp_enterframe()
+ r11v = X86_64_SCRATCH_REG.value
+ rjh = self.heap_rjthread_head()
+ mc.ADD_ri8(ebx.value, 1) # ADD ebx, 1
+ mc.MOV_rm(r11v, rjh) # MOV r11, [rjthread.head]
+ mc.MOV_sr(STM_SHADOWSTACK_BASE_OFS, ebx.value)
+ # MOV [esp+ssbase], ebx
+ mc.ADD_ri8(ebx.value, WORD-1) # ADD ebx, 7
+ mc.MOV_sr(STM_PREV_OFS, r11v) # MOV [esp+prev], r11
+ mc.MOV(self.heap_shadowstack_top(), ebx) # MOV [rootstacktop], ebx
+ mc.LEA_rs(r11v, STM_JMPBUF_OFS) # LEA r11, [esp+bufofs]
+ mc.MOV_mr(rjh, r11v) # MOV [rjthread.head], r11
+ #
else:
- self.mc.MOV_mr((self.SEGMENT_NO, ebx.value, 0),
- ebp.value) # MOV [ebx], ebp
- self.mc.ADD_ri(ebx.value, WORD)
- self.mc.MOV(self.heap_shadowstack_top(), ebx) # MOV [rootstacktop], ebx
+ mc.ADD_ri(ebx.value, WORD) # ADD ebx, WORD
+ mc.MOV(self.heap_shadowstack_top(), ebx) # MOV [rootstacktop], ebx
def _call_footer_shadowstack(self):
+ mc = self.mc
if self.cpu.gc_ll_descr.stm:
# STM: in the rare case where we need realloc_frame, the new
# frame is pushed on top of the old one. It's even possible
# that this occurs more than once. So we have to restore
# the old shadowstack by looking up its original saved value.
- self.mc.MOV_rs(ecx.value, STM_OLD_SHADOWSTACK)
- self.mc.MOV(self.heap_shadowstack_top(), ecx)
+ # The rest of this is inlining stm_rewind_jmp_leaveframe().
+ r11v = X86_64_SCRATCH_REG.value
+ rjh = self.heap_rjthread_head()
+ rjmovd_o_b = self.heap_rjthread_moved_off_base()
+ adr_rjthread_moved_off_base
+ mc.MOV_rs(r11v, STM_SHADOWSTACK_BASE_OFS) # MOV r11, [esp+ssbase]
+ mc.MOV_rs(ebx.value, STM_PREV_OFS) # MOV ebx, [esp+prev]
+ mc.MOV(self.heap_shadowstack_top(), r11v) # MOV [rootstacktop], r11
+ mc.LEA_rs(r11v, STM_JMPBUF_OFS) # LEA r11, [esp+bufofs]
+ mc.MOV_mr(rjh, ebx.value) # MOV [rjthread.head], ebx
+ mc.CMP_rm(r11v, rjmovd_o_b) # CMP r11, [rjth.movd_o_b]
+ mc.J_il8(rx86.Conditions['NE'], 0) # JNE label_below
+ jne_location = mc.get_relative_pos()
+ #
+ mc.CALL(imm(rstm.adr_pypy__rewind_jmp_copy_stack_slice))
+ #
+ # patch the JNE above
+ offset = mc.get_relative_pos() - jne_location
+ assert 0 < offset <= 127
+ mc.overwrite(jne_location-1, chr(offset))
else:
# SUB [rootstacktop], WORD
gcrootmap = self.cpu.gc_ll_descr.gcrootmap
rst = gcrootmap.get_root_stack_top_addr()
if rx86.fits_in_32bits(rst):
# SUB [rootstacktop], WORD
- self.mc.SUB_ji8((self.SEGMENT_NO, rst), WORD)
+ mc.SUB_ji8((self.SEGMENT_NO, rst), WORD)
else:
# MOV ebx, rootstacktop
# SUB [ebx], WORD
- self.mc.MOV_ri(ebx.value, rst)
- self.mc.SUB_mi8((self.SEGMENT_NO, ebx.value, 0), WORD)
+ mc.MOV_ri(ebx.value, rst)
+ mc.SUB_mi8((self.SEGMENT_NO, ebx.value, 0), WORD)
def redirect_call_assembler(self, oldlooptoken, newlooptoken):
# some minimal sanity checking
diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py
--- a/rpython/jit/backend/x86/regalloc.py
+++ b/rpython/jit/backend/x86/regalloc.py
@@ -1292,9 +1292,6 @@
need_lower_byte=True)
self.perform(op, [], resloc)
- def XXXconsider_stm_transaction_break(self, op, guard_op):
- self.perform_with_guard(op, guard_op, [], None)
-
def consider_jump(self, op):
assembler = self.assembler
assert self.jump_target_descr is None
diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py
--- a/rpython/rlib/rstm.py
+++ b/rpython/rlib/rstm.py
@@ -13,12 +13,16 @@
TID = rffi.UINT
tid_offset = CFlexSymbolic('offsetof(struct rpyobj_s, tid)')
stm_nb_segments = CFlexSymbolic('STM_NB_SEGMENTS')
-stm_stack_marker_new = CFlexSymbolic('STM_STACK_MARKER_NEW')
-stm_stack_marker_old = CFlexSymbolic('STM_STACK_MARKER_OLD')
adr_nursery_free = CFlexSymbolic('((long)&STM_SEGMENT->nursery_current)')
adr_nursery_top = CFlexSymbolic('((long)&STM_SEGMENT->nursery_end)')
adr_pypy_stm_nursery_low_fill_mark = (
CFlexSymbolic('((long)&pypy_stm_nursery_low_fill_mark)'))
+adr_rjthread = (
+ CFlexSymbolic('((long)&stm_thread_local.rjthread'))
+adr_rjthread_head = (
+ CFlexSymbolic('((long)&stm_thread_local.rjthread.head'))
+adr_rjthread_moved_off_base = (
+ CFlexSymbolic('((long)&stm_thread_local.rjthread.moved_off_base'))
adr_transaction_read_version = (
CFlexSymbolic('((long)&STM_SEGMENT->transaction_read_version)'))
adr_jmpbuf_ptr = (
@@ -39,6 +43,8 @@
CFlexSymbolic('((long)&stm_commit_transaction)'))
adr_pypy_stm_start_transaction = (
CFlexSymbolic('((long)&pypy_stm_start_transaction)'))
+adr_pypy__rewind_jmp_copy_stack_slice = (
+ CFlexSymbolic('((long)&pypy__rewind_jmp_copy_stack_slice)'))
def rewind_jmp_frame():
diff --git a/rpython/translator/stm/src_stm/stmgcintf.h b/rpython/translator/stm/src_stm/stmgcintf.h
--- a/rpython/translator/stm/src_stm/stmgcintf.h
+++ b/rpython/translator/stm/src_stm/stmgcintf.h
@@ -116,5 +116,10 @@
/* NB. this logic is hard-coded in jit/backend/x86/assembler.py too */
}
+static void pypy__rewind_jmp_copy_stack_slice(void)
+{
+ _rewind_jmp_copy_stack_slice(&stm_thread_local.rjthread);
+}
+
#endif /* _RPY_STMGCINTF_H */
From noreply at buildbot.pypy.org Mon Aug 18 10:42:23 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Mon, 18 Aug 2014 10:42:23 +0200 (CEST)
Subject: [pypy-commit] stmgc default: Avoid one word here with the jit
Message-ID: <20140818084223.167091C059C@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r1316:e85ce411f190
Date: 2014-08-18 10:10 +0200
http://bitbucket.org/pypy/stmgc/changeset/e85ce411f190/
Log: Avoid one word here with the jit
diff --git a/c7/stm/rewind_setjmp.c b/c7/stm/rewind_setjmp.c
--- a/c7/stm/rewind_setjmp.c
+++ b/c7/stm/rewind_setjmp.c
@@ -37,8 +37,17 @@
size_t stack_size, ssstack_size;
assert(rjthread->head != NULL);
- stop = rjthread->head->frame_base;
ssstop = rjthread->head->shadowstack_base;
+ if (((long)ssstop) & 1) {
+ /* PyPy's JIT: 'head->frame_base' is missing; use directly 'head',
+ which should be at the end of the frame (and doesn't need itself
+ to be copied because it contains immutable data only) */
+ ssstop = ((char *)ssstop) - 1;
+ stop = (char *)rjthread->head;
+ }
+ else {
+ stop = rjthread->head->frame_base;
+ }
assert(stop >= base);
assert(ssstop <= ssbase);
stack_size = stop - base;
diff --git a/c7/stm/rewind_setjmp.h b/c7/stm/rewind_setjmp.h
--- a/c7/stm/rewind_setjmp.h
+++ b/c7/stm/rewind_setjmp.h
@@ -53,9 +53,12 @@
************************************************************/
typedef struct _rewind_jmp_buf {
- char *frame_base;
char *shadowstack_base;
struct _rewind_jmp_buf *prev;
+ char *frame_base;
+ /* NB: PyPy's JIT has got details of this structure hard-coded,
+ as follows: it uses 2 words only (so frame_base is invalid)
+ and sets the lowest bit of 'shadowstack_base' to tell this */
} rewind_jmp_buf;
typedef struct {
@@ -71,6 +74,7 @@
/* remember the current stack and ss_stack positions */
#define rewind_jmp_enterframe(rjthread, rjbuf, ss) do { \
+ assert((((long)(ss)) & 1) == 0); \
(rjbuf)->frame_base = __builtin_frame_address(0); \
(rjbuf)->shadowstack_base = (char *)(ss); \
(rjbuf)->prev = (rjthread)->head; \
From noreply at buildbot.pypy.org Mon Aug 18 10:49:00 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Mon, 18 Aug 2014 10:49:00 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Kill this code
Message-ID: <20140818084900.3DD151C059C@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72869:f028586da4f9
Date: 2014-08-18 10:48 +0200
http://bitbucket.org/pypy/pypy/changeset/f028586da4f9/
Log: Kill this code
diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py
--- a/rpython/jit/backend/x86/assembler.py
+++ b/rpython/jit/backend/x86/assembler.py
@@ -2662,78 +2662,6 @@
else:
self.implement_guard(guard_token, 'AE') # JAE goes to "no, don't"
- def XXXgenop_guard_stm_transaction_break(self, op, guard_op, guard_token,
- arglocs, result_loc):
- assert self.cpu.gc_ll_descr.stm
- if not we_are_translated():
- return # tests only
-
- gcmap = self._regalloc.get_gcmap()
- self._store_force_index(guard_op)
-
- mc = self.mc
- self._generate_cmp_break_transaction()
- # use JAE to jump over the following piece of code if we don't need
- # to break the transaction now
- mc.J_il(rx86.Conditions['AE'], 0xfffff) # patched later
- jae_location = mc.get_relative_pos()
-
- # This is the case in which we have to do the same as the logic
- # in pypy_stm_perform_transaction(). We know that we're not in
- # an atomic transaction (otherwise the jump above always triggers).
- # So we only have to do the following three operations:
- # stm_commit_transaction();
- # __builtin_setjmp(jmpbuf);
- # pypy_stm_start_transaction(&jmpbuf);
-
- # save all registers and the gcmap
- self.push_gcmap(mc, gcmap, store=True)
- grp_regs = self._regalloc.rm.reg_bindings.values()
- xmm_regs = self._regalloc.xrm.reg_bindings.values()
- self._push_pop_regs_to_frame(True, mc, grp_regs, xmm_regs)
- #
- # call stm_commit_transaction()
- mc.CALL(imm(rstm.adr_stm_commit_transaction))
- #
- # update the two words in the STM_RESUME_BUF, as described
- # in arch.py. The "learip" pseudo-instruction turns into
- # what is, in gnu as syntax: lea 0(%rip), %rax (the 0 is
- # four bytes, patched just below)
- mc.LEARIP_rl32(eax.value, 0)
- learip_location = mc.get_relative_pos()
- mc.MOV_sr(STM_JMPBUF_OFS_RIP, eax.value)
- mc.MOV_sr(STM_JMPBUF_OFS_RSP, esp.value)
- mc.XOR(ebp, ebp)
- mc.MOV_sr(STM_JMPBUF_OFS_RBP, ebp.value)
- #
- offset = mc.get_relative_pos() - learip_location
- assert 0 < offset <= 127
- mc.overwrite32(learip_location - 4, offset)
- # ** HERE ** is the place an aborted transaction retries
- # (when resuming, ebp is garbage, but the STM_RESUME_BUF is
- # still correct in case of repeated aborting)
- #
- # call pypy_stm_start_transaction(&jmpbuf, &v_counter)
- # where v_counter is abusively stored in the jmpbuf at
- # the location for ebp (so that the value in v_counter
- # is here found in ebp, if we needed it).
- mc.LEA_rs(edi.value, STM_JMPBUF_OFS)
- mc.LEA_rs(esi.value, STM_JMPBUF_OFS_RBP)
- mc.CALL(imm(rstm.adr_pypy_stm_start_transaction))
- #
- # reload ebp with the frame now
- self._reload_frame_if_necessary(self.mc)
- #
- # restore regs
- self._push_pop_regs_to_frame(False, mc, grp_regs, xmm_regs)
- #
- self._emit_guard_not_forced(guard_token)
-
- # patch the JAE above (note that we also skip the guard_not_forced
- # in the common situation where we jump over the code above)
- offset = mc.get_relative_pos() - jae_location
- mc.overwrite32(jae_location - 4, offset)
-
def genop_discard_stm_read(self, op, arglocs):
if not IS_X86_64:
todo() # "needed for X86_64_SCRATCH_REG"
From noreply at buildbot.pypy.org Mon Aug 18 10:51:32 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Mon, 18 Aug 2014 10:51:32 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Kill more stuff
Message-ID: <20140818085132.0D3B41C059C@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72870:ebf6e1ab041b
Date: 2014-08-18 10:51 +0200
http://bitbucket.org/pypy/pypy/changeset/ebf6e1ab041b/
Log: Kill more stuff
diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py
--- a/rpython/jit/backend/x86/assembler.py
+++ b/rpython/jit/backend/x86/assembler.py
@@ -837,35 +837,6 @@
def _call_footer(self):
gcrootmap = self.cpu.gc_ll_descr.gcrootmap
- if self.cpu.gc_ll_descr.stm and we_are_translated():
- # call _pypy_stm_become_inevitable() if the current jmpbuf is set
- # to this frame, because we're about to leave. This is if
- # we called a pypy_stm_start_transaction() earlier.
- assert IS_X86_64
- mc = self.mc
- #
- # load the address of the jmpbuf
- mc.LEA_rs(edi.value, STM_JMPBUF_OFS)
- # compare it with the currently-stored jmpbuf
- mc.CMP_rj(edi.value, (self.SEGMENT_GC, rstm.adr_jmpbuf_ptr))
- # if they differ (or if jmpbuf_ptr is already NULL), nothing to do
- mc.J_il8(rx86.Conditions['NE'], 0) # patched later
- jne_location = mc.get_relative_pos()
- #
- # if they are equal, we need to become inevitable now
- mc.XOR_rr(edi.value, edi.value)
- mc.CALL(imm(rstm.adr__pypy_stm_become_inevitable))
- # there could have been a collection in the call above;
- # reload the frame into ebp (but we don't need to apply the
- # write barrier to it now)
- mc.MOV(ecx, self.heap_shadowstack_top())
- mc.MOV_rm(ebp.value, (self.SEGMENT_NO, ecx.value, -WORD))
- #
- # this is where the JNE above jumps
- offset = mc.get_relative_pos() - jne_location
- assert 0 < offset <= 127
- mc.overwrite(jne_location-1, chr(offset))
-
if gcrootmap and gcrootmap.is_shadow_stack:
self._call_footer_shadowstack()
diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py
--- a/rpython/rlib/rstm.py
+++ b/rpython/rlib/rstm.py
@@ -25,8 +25,6 @@
CFlexSymbolic('((long)&stm_thread_local.rjthread.moved_off_base'))
adr_transaction_read_version = (
CFlexSymbolic('((long)&STM_SEGMENT->transaction_read_version)'))
-adr_jmpbuf_ptr = (
- CFlexSymbolic('((long)&STM_SEGMENT->jmpbuf_ptr)'))
adr_segment_base = (
CFlexSymbolic('((long)&STM_SEGMENT->segment_base)'))
adr_write_slowpath = CFlexSymbolic('((long)&_stm_write_slowpath)')
@@ -37,12 +35,6 @@
CARD_MARKED = CFlexSymbolic('_STM_CARD_MARKED')
CARD_SIZE = CFlexSymbolic('_STM_CARD_SIZE')
-adr__pypy_stm_become_inevitable = (
- CFlexSymbolic('((long)&_pypy_stm_become_inevitable)'))
-adr_stm_commit_transaction = (
- CFlexSymbolic('((long)&stm_commit_transaction)'))
-adr_pypy_stm_start_transaction = (
- CFlexSymbolic('((long)&pypy_stm_start_transaction)'))
adr_pypy__rewind_jmp_copy_stack_slice = (
CFlexSymbolic('((long)&pypy__rewind_jmp_copy_stack_slice)'))
From noreply at buildbot.pypy.org Mon Aug 18 12:07:01 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Mon, 18 Aug 2014 12:07:01 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: in-progress
Message-ID: <20140818100701.BDBCE1C0EF5@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72874:a279a16c839d
Date: 2014-08-18 12:06 +0200
http://bitbucket.org/pypy/pypy/changeset/a279a16c839d/
Log: in-progress
diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py
--- a/rpython/jit/backend/llsupport/assembler.py
+++ b/rpython/jit/backend/llsupport/assembler.py
@@ -74,9 +74,9 @@
self.gc_minimal_size_in_nursery = gc_ll_descr.minimal_size_in_nursery
else:
self.gc_minimal_size_in_nursery = 0
- if getattr(gc_ll_descr, 'gcheaderbuilder', None) is not None:
+ try:
self.gc_size_of_header = gc_ll_descr.gcheaderbuilder.size_gc_header
- else:
+ except AttributeError:
self.gc_size_of_header = WORD # for tests
self.memcpy_addr = self.cpu.cast_ptr_to_int(memcpy_fn)
# building the barriers needs to happen before these:
diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py
--- a/rpython/jit/backend/x86/assembler.py
+++ b/rpython/jit/backend/x86/assembler.py
@@ -856,10 +856,6 @@
gcrootmap = self.cpu.gc_ll_descr.gcrootmap
return self.heap_tl(gcrootmap.get_root_stack_top_addr())
- def heap_rjthread(self):
- """STM: Return an AddressLoc for '&stm_thread_local.rjthread'."""
- return self.heap_tl(rstm.adr_rjthread)
-
def heap_rjthread_head(self):
"""STM: Return an AddressLoc for '&stm_thread_local.rjthread.head'."""
return self.heap_tl(rstm.adr_rjthread_head)
@@ -878,17 +874,17 @@
# MOV [ebx], ebp
if self.cpu.gc_ll_descr.stm:
# inlining stm_rewind_jmp_enterframe()
- r11v = X86_64_SCRATCH_REG.value
+ r11 = X86_64_SCRATCH_REG
rjh = self.heap_rjthread_head()
- mc.ADD_ri8(ebx.value, 1) # ADD ebx, 1
- mc.MOV_rm(r11v, rjh) # MOV r11, [rjthread.head]
+ mc.ADD_ri(ebx.value, 1) # ADD ebx, 1
+ mc.MOV(r11, rjh) # MOV r11, [rjthread.head]
mc.MOV_sr(STM_SHADOWSTACK_BASE_OFS, ebx.value)
# MOV [esp+ssbase], ebx
- mc.ADD_ri8(ebx.value, WORD-1) # ADD ebx, 7
- mc.MOV_sr(STM_PREV_OFS, r11v) # MOV [esp+prev], r11
+ mc.ADD_ri(ebx.value, WORD-1) # ADD ebx, 7
+ mc.MOV_sr(STM_PREV_OFS, r11.value) # MOV [esp+prev], r11
mc.MOV(self.heap_shadowstack_top(), ebx) # MOV [rootstacktop], ebx
- mc.LEA_rs(r11v, STM_JMPBUF_OFS) # LEA r11, [esp+bufofs]
- mc.MOV_mr(rjh, r11v) # MOV [rjthread.head], r11
+ mc.LEA_rs(r11.value, STM_JMPBUF_OFS) # LEA r11, [esp+bufofs]
+ mc.MOV(rjh, r11) # MOV [rjthread.head], r11
#
else:
mc.ADD_ri(ebx.value, WORD) # ADD ebx, WORD
@@ -902,16 +898,16 @@
# that this occurs more than once. So we have to restore
# the old shadowstack by looking up its original saved value.
# The rest of this is inlining stm_rewind_jmp_leaveframe().
- r11v = X86_64_SCRATCH_REG.value
+ r11 = X86_64_SCRATCH_REG
rjh = self.heap_rjthread_head()
rjmovd_o_b = self.heap_rjthread_moved_off_base()
- adr_rjthread_moved_off_base
- mc.MOV_rs(r11v, STM_SHADOWSTACK_BASE_OFS) # MOV r11, [esp+ssbase]
- mc.MOV_rs(ebx.value, STM_PREV_OFS) # MOV ebx, [esp+prev]
- mc.MOV(self.heap_shadowstack_top(), r11v) # MOV [rootstacktop], r11
- mc.LEA_rs(r11v, STM_JMPBUF_OFS) # LEA r11, [esp+bufofs]
- mc.MOV_mr(rjh, ebx.value) # MOV [rjthread.head], ebx
- mc.CMP_rm(r11v, rjmovd_o_b) # CMP r11, [rjth.movd_o_b]
+ mc.MOV_rs(ebx.value, STM_SHADOWSTACK_BASE_OFS)
+ # MOV ebx, [esp+ssbase]
+ mc.MOV_rs(r11.value, STM_PREV_OFS) # MOV r11, [esp+prev]
+ mc.MOV(self.heap_shadowstack_top(), ebx) # MOV [rootstacktop], ebx
+ mc.LEA_rs(ebx.value, STM_JMPBUF_OFS) # LEA ebx, [esp+bufofs]
+ mc.MOV(rjh, r11) # MOV [rjthread.head], r11
+ mc.CMP(ebx, rjmovd_o_b) # CMP ebx, [rjth.movd_o_b]
mc.J_il8(rx86.Conditions['NE'], 0) # JNE label_below
jne_location = mc.get_relative_pos()
#
diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py
--- a/rpython/rlib/rstm.py
+++ b/rpython/rlib/rstm.py
@@ -17,12 +17,10 @@
adr_nursery_top = CFlexSymbolic('((long)&STM_SEGMENT->nursery_end)')
adr_pypy_stm_nursery_low_fill_mark = (
CFlexSymbolic('((long)&pypy_stm_nursery_low_fill_mark)'))
-adr_rjthread = (
- CFlexSymbolic('((long)&stm_thread_local.rjthread'))
adr_rjthread_head = (
- CFlexSymbolic('((long)&stm_thread_local.rjthread.head'))
+ CFlexSymbolic('((long)&stm_thread_local.rjthread.head)'))
adr_rjthread_moved_off_base = (
- CFlexSymbolic('((long)&stm_thread_local.rjthread.moved_off_base'))
+ CFlexSymbolic('((long)&stm_thread_local.rjthread.moved_off_base)'))
adr_transaction_read_version = (
CFlexSymbolic('((long)&STM_SEGMENT->transaction_read_version)'))
adr_segment_base = (
@@ -74,7 +72,7 @@
@dont_look_inside
def break_transaction():
- llop.stm_break_transaction(lltype.Void)
+ llop.stm_transaction_break(lltype.Void)
@dont_look_inside
def set_transaction_length(fraction):
diff --git a/rpython/translator/backendopt/gilanalysis.py b/rpython/translator/backendopt/gilanalysis.py
--- a/rpython/translator/backendopt/gilanalysis.py
+++ b/rpython/translator/backendopt/gilanalysis.py
@@ -24,7 +24,7 @@
return False
def analyze_simple_operation(self, op, graphinfo):
- if op.opname == 'stm_break_transaction':
+ if op.opname == 'stm_transaction_break':
return True
return False
diff --git a/rpython/translator/stm/src_stm/extracode.h b/rpython/translator/stm/src_stm/extracode.h
--- a/rpython/translator/stm/src_stm/extracode.h
+++ b/rpython/translator/stm/src_stm/extracode.h
@@ -77,16 +77,6 @@
long fnlen = 1, nlen = 1, line = 0;
char *fn = "?", *name = "?";
-#ifdef RPY_STM_JIT
- if (odd_number == STM_STACK_MARKER_NEW ||
- odd_number == STM_STACK_MARKER_OLD) {
- assert(o);
- /* XXX ji_jf_forward */
- /* XXX */
- o = NULL;
- }
-#endif
-
if (o) {
co_filename =_fetch_rpsspace0(segment_base, o, g_co_filename_ofs);
co_name =_fetch_rpsspace0(segment_base, o, g_co_name_ofs);
From noreply at buildbot.pypy.org Mon Aug 18 13:41:23 2014
From: noreply at buildbot.pypy.org (Raemi)
Date: Mon, 18 Aug 2014 13:41:23 +0200 (CEST)
Subject: [pypy-commit] stmgc default: fix release build of random2
Message-ID: <20140818114123.73E051C059C@cobra.cs.uni-duesseldorf.de>
Author: Remi Meier
Branch:
Changeset: r1317:b62545917bee
Date: 2014-08-18 13:41 +0200
http://bitbucket.org/pypy/stmgc/changeset/b62545917bee/
Log: fix release build of random2
diff --git a/c7/demo/demo_random2.c b/c7/demo/demo_random2.c
--- a/c7/demo/demo_random2.c
+++ b/c7/demo/demo_random2.c
@@ -110,7 +110,7 @@
num = get_rand(ss_size);
/* XXX: impl detail: there is already a "-1" on the SS -> +1 */
objptr_t r = (objptr_t)stm_thread_local.shadowstack_base[num+1].ss;
- assert((((uintptr_t)r) & 3) == 0);
+ OPT_ASSERT((((uintptr_t)r) & 3) == 0);
}
if (num == 1 && td.active_roots_num > 0) {
@@ -380,7 +380,7 @@
}
}
}
- assert(roots_on_ss == td.roots_on_ss);
+ OPT_ASSERT(roots_on_ss == td.roots_on_ss);
stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf);
}
From noreply at buildbot.pypy.org Mon Aug 18 13:58:39 2014
From: noreply at buildbot.pypy.org (Raemi)
Date: Mon, 18 Aug 2014 13:58:39 +0200 (CEST)
Subject: [pypy-commit] stmgc default: add all demos to tests
Message-ID: <20140818115839.F42101C347F@cobra.cs.uni-duesseldorf.de>
Author: Remi Meier
Branch:
Changeset: r1318:ec5c149ff346
Date: 2014-08-18 13:58 +0200
http://bitbucket.org/pypy/stmgc/changeset/ec5c149ff346/
Log: add all demos to tests
diff --git a/c7/demo/demo_simple.c b/c7/demo/demo_simple.c
--- a/c7/demo/demo_simple.c
+++ b/c7/demo/demo_simple.c
@@ -10,7 +10,7 @@
# include "stmgc.h"
#endif
-#define ITERS 1000000
+#define ITERS 100000
#define NTHREADS 2
@@ -59,14 +59,16 @@
void *demo2(void *arg)
{
int status;
+ rewind_jmp_buf rjbuf;
stm_register_thread_local(&stm_thread_local);
+ stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf);
char *org = (char *)stm_thread_local.shadowstack;
tl_counter = 0;
object_t *tmp;
int i = 0;
while (i < ITERS) {
- stm_start_inevitable_transaction(&stm_thread_local);
+ stm_start_transaction(&stm_thread_local);
tl_counter++;
if (i % 500 < 250)
STM_PUSH_ROOT(stm_thread_local, stm_allocate(16));//gl_counter++;
@@ -76,8 +78,9 @@
i++;
}
- assert(org == (char *)stm_thread_local.shadowstack);
+ OPT_ASSERT(org == (char *)stm_thread_local.shadowstack);
+ stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf);
stm_unregister_thread_local(&stm_thread_local);
status = sem_post(&done); assert(status == 0);
return NULL;
diff --git a/c7/test/test_demo.py b/c7/test/test_demo.py
--- a/c7/test/test_demo.py
+++ b/c7/test/test_demo.py
@@ -15,6 +15,19 @@
def test_shadowstack(self): self.make_and_run("debug-test_shadowstack")
- def test_demo2_debug(self): self.make_and_run("debug-demo2")
+ def test_demo_simple_build(self): self.make_and_run("build-demo_simple")
+ def test_demo_largemalloc_build(self): self.make_and_run("build-demo_largemalloc")
+
+
+
+ # def test_demo2_debug(self): self.make_and_run("debug-demo2")
def test_demo2_build(self): self.make_and_run("build-demo2")
def test_demo2_release(self): self.make_and_run("release-demo2")
+
+ # def test_demo_random_debug(self): self.make_and_run("debug-demo_random")
+ def test_demo_random_build(self): self.make_and_run("build-demo_random")
+ def test_demo_random_release(self): self.make_and_run("release-demo_random")
+
+ # def test_demo_random2_debug(self): self.make_and_run("debug-demo_random2")
+ def test_demo_random2_build(self): self.make_and_run("build-demo_random2")
+ def test_demo_random2_release(self): self.make_and_run("release-demo_random2")
From noreply at buildbot.pypy.org Mon Aug 18 14:01:56 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Mon, 18 Aug 2014 14:01:56 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Fix
Message-ID: <20140818120156.1FBC11C347F@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72875:2eafe08987ef
Date: 2014-08-18 13:52 +0200
http://bitbucket.org/pypy/pypy/changeset/2eafe08987ef/
Log: Fix
diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py
--- a/rpython/jit/backend/x86/assembler.py
+++ b/rpython/jit/backend/x86/assembler.py
@@ -904,6 +904,7 @@
mc.MOV_rs(ebx.value, STM_SHADOWSTACK_BASE_OFS)
# MOV ebx, [esp+ssbase]
mc.MOV_rs(r11.value, STM_PREV_OFS) # MOV r11, [esp+prev]
+ mc.SUB_ri(ebx.value, 1) # SUB ebx, 1
mc.MOV(self.heap_shadowstack_top(), ebx) # MOV [rootstacktop], ebx
mc.LEA_rs(ebx.value, STM_JMPBUF_OFS) # LEA ebx, [esp+bufofs]
mc.MOV(rjh, r11) # MOV [rjthread.head], r11
From noreply at buildbot.pypy.org Mon Aug 18 14:26:22 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Mon, 18 Aug 2014 14:26:22 +0200 (CEST)
Subject: [pypy-commit] pypy default: Fix for "assert isinstance(x,
str)" in RPython, in case x is "str-or-None".
Message-ID: <20140818122622.712401C0EF5@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r72876:a24d530761ce
Date: 2014-08-18 14:25 +0200
http://bitbucket.org/pypy/pypy/changeset/a24d530761ce/
Log: Fix for "assert isinstance(x, str)" in RPython, in case x is "str-
or-None".
diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py
--- a/rpython/annotator/test/test_annrpython.py
+++ b/rpython/annotator/test/test_annrpython.py
@@ -4301,6 +4301,38 @@
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeString)
+ def test_isinstance_str_1(self):
+ def g():
+ pass
+ def f(n):
+ if n > 5:
+ s = "foo"
+ else:
+ s = None
+ g()
+ return isinstance(s, str)
+ a = self.RPythonAnnotator()
+ s = a.build_types(f, [int])
+ assert isinstance(s, annmodel.SomeBool)
+ assert not s.is_constant()
+
+ def test_isinstance_str_2(self):
+ def g():
+ pass
+ def f(n):
+ if n > 5:
+ s = "foo"
+ else:
+ s = None
+ g()
+ if isinstance(s, str):
+ return s
+ return ""
+ a = self.RPythonAnnotator()
+ s = a.build_types(f, [int])
+ assert isinstance(s, annmodel.SomeString)
+ assert not s.can_be_none()
+
def g(n):
return [0, 1, 2, n]
diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py
--- a/rpython/rtyper/rbuiltin.py
+++ b/rpython/rtyper/rbuiltin.py
@@ -683,13 +683,14 @@
if hop.s_result.is_constant():
return hop.inputconst(lltype.Bool, hop.s_result.const)
- if hop.args_s[1].is_constant() and hop.args_s[1].const == list:
- if hop.args_s[0].knowntype != list:
- raise TyperError("isinstance(x, list) expects x to be known statically to be a list or None")
- rlist = hop.args_r[0]
- vlist = hop.inputarg(rlist, arg=0)
- cnone = hop.inputconst(rlist, None)
- return hop.genop('ptr_ne', [vlist, cnone], resulttype=lltype.Bool)
+ if hop.args_s[1].is_constant() and hop.args_s[1].const in (str, list):
+ if hop.args_s[0].knowntype not in (str, list):
+ raise TyperError("isinstance(x, str/list) expects x to be known"
+ " statically to be a str/list or None")
+ rstrlist = hop.args_r[0]
+ vstrlist = hop.inputarg(rstrlist, arg=0)
+ cnone = hop.inputconst(rstrlist, None)
+ return hop.genop('ptr_ne', [vstrlist, cnone], resulttype=lltype.Bool)
assert isinstance(hop.args_r[0], rclass.InstanceRepr)
return hop.args_r[0].rtype_isinstance(hop)
diff --git a/rpython/rtyper/test/test_rbuiltin.py b/rpython/rtyper/test/test_rbuiltin.py
--- a/rpython/rtyper/test/test_rbuiltin.py
+++ b/rpython/rtyper/test/test_rbuiltin.py
@@ -364,17 +364,35 @@
assert res == isinstance([A(), B(), C()][x-1], [A, B, C][y-1]) * 3
def test_isinstance_list(self):
+ def g():
+ pass
def f(i):
if i == 0:
l = []
else:
l = None
+ g()
return isinstance(l, list)
res = self.interpret(f, [0])
assert res is True
res = self.interpret(f, [1])
assert res is False
+ def test_isinstance_str(self):
+ def g():
+ pass
+ def f(i):
+ if i == 0:
+ l = "foobar"
+ else:
+ l = None
+ g()
+ return isinstance(l, str)
+ res = self.interpret(f, [0])
+ assert res is True
+ res = self.interpret(f, [1])
+ assert res is False
+
def test_instantiate(self):
class A:
pass
From noreply at buildbot.pypy.org Mon Aug 18 14:27:50 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Mon, 18 Aug 2014 14:27:50 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Fix
Message-ID: <20140818122750.568081C0EF5@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72877:e7b26f5a2320
Date: 2014-08-18 14:27 +0200
http://bitbucket.org/pypy/pypy/changeset/e7b26f5a2320/
Log: Fix
diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py
--- a/pypy/module/pypyjit/interp_jit.py
+++ b/pypy/module/pypyjit/interp_jit.py
@@ -44,8 +44,7 @@
pypyjitdriver = PyPyJitDriver(get_printable_location = get_printable_location,
should_unroll_one_iteration =
should_unroll_one_iteration,
- name='pypyjit',
- stm_do_transaction_breaks=True)
+ name='pypyjit')
class __extend__(PyFrame):
From noreply at buildbot.pypy.org Mon Aug 18 16:05:34 2014
From: noreply at buildbot.pypy.org (amauryfa)
Date: Mon, 18 Aug 2014 16:05:34 +0200 (CEST)
Subject: [pypy-commit] pypy default: hg merge default
Message-ID: <20140818140534.A38811D22EF@cobra.cs.uni-duesseldorf.de>
Author: Amaury Forgeot d'Arc
Branch:
Changeset: r72878:77c1babd513e
Date: 2014-08-18 00:53 +0200
http://bitbucket.org/pypy/pypy/changeset/77c1babd513e/
Log: hg merge default
diff too long, truncating to 2000 out of 10260 lines
diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py
--- a/pypy/interpreter/astcompiler/ast.py
+++ b/pypy/interpreter/astcompiler/ast.py
@@ -1,5 +1,4 @@
# Generated by tools/asdl_py.py
-from rpython.rlib.unroll import unrolling_iterable
from rpython.tool.pairtype import extendabletype
from rpython.tool.sourcetools import func_with_new_name
@@ -11,7 +10,7 @@
def raise_attriberr(space, w_obj, name):
raise oefmt(space.w_AttributeError,
- "'%T' object has no attribute '%s'", w_obj, name)
+ \"'%T' object has no attribute '%s'\", w_obj, name)
def check_string(space, w_obj):
@@ -21,11 +20,15 @@
'AST string must be of type str or unicode'))
return w_obj
-
-class AST(W_Root):
-
- w_dict = None
-
+def get_field(space, w_node, name, optional):
+ w_obj = w_node.getdictvalue(space, name)
+ if w_obj is None and not optional:
+ raise oefmt(space.w_TypeError,
+ "required field \"%s\" missing from %T", name, w_node)
+ return w_obj
+
+
+class AST(object):
__metaclass__ = extendabletype
def walkabout(self, visitor):
@@ -34,8 +37,23 @@
def mutate_over(self, visitor):
raise AssertionError("mutate_over() implementation not provided")
- def sync_app_attrs(self, space):
- raise NotImplementedError
+
+class NodeVisitorNotImplemented(Exception):
+ pass
+
+
+class _FieldsWrapper(W_Root):
+ "Hack around the fact we can't store tuples on a TypeDef."
+
+ def __init__(self, fields):
+ self.fields = fields
+
+ def __spacebind__(self, space):
+ return space.newtuple([space.wrap(field) for field in self.fields])
+
+
+class W_AST(W_Root):
+ w_dict = None
def getdict(self, space):
if self.w_dict is None:
@@ -47,7 +65,7 @@
if w_dict is None:
w_dict = space.newdict()
w_type = space.type(self)
- w_fields = w_type.getdictvalue(space, "_fields")
+ w_fields = space.getattr(w_type, space.wrap("_fields"))
for w_name in space.fixedview(w_fields):
try:
space.setitem(w_dict, w_name,
@@ -71,79 +89,94 @@
space.setattr(self, w_name,
space.getitem(w_state, w_name))
- def missing_field(self, space, required, host):
- "Find which required field is missing."
- state = self.initialization_state
- for i in range(len(required)):
- if (state >> i) & 1:
- continue # field is present
- missing = required[i]
- if missing is None:
- continue # field is optional
- w_obj = self.getdictvalue(space, missing)
- if w_obj is None:
- raise oefmt(space.w_TypeError,
- "required field \"%s\" missing from %s",
- missing, host)
- else:
- raise oefmt(space.w_TypeError,
- "incorrect type for field \"%s\" in %s",
- missing, host)
- raise AssertionError("should not reach here")
-
-
-class NodeVisitorNotImplemented(Exception):
- pass
-
-
-class _FieldsWrapper(W_Root):
- "Hack around the fact we can't store tuples on a TypeDef."
-
- def __init__(self, fields):
- self.fields = fields
-
- def __spacebind__(self, space):
- return space.newtuple([space.wrap(field) for field in self.fields])
-
-
-def get_AST_new(node_class):
- def generic_AST_new(space, w_type, __args__):
- node = space.allocate_instance(node_class, w_type)
- node.initialization_state = 0
- return space.wrap(node)
- return func_with_new_name(generic_AST_new, "new_%s" % node_class.__name__)
-
-def AST_init(space, w_self, __args__):
+def W_AST_new(space, w_type, __args__):
+ node = space.allocate_instance(W_AST, w_type)
+ return space.wrap(node)
+
+def W_AST_init(space, w_self, __args__):
args_w, kwargs_w = __args__.unpack()
- if args_w and len(args_w) != 0:
- w_err = space.wrap("_ast.AST constructor takes 0 positional arguments")
- raise OperationError(space.w_TypeError, w_err)
+ fields_w = space.fixedview(space.getattr(space.type(w_self),
+ space.wrap("_fields")))
+ num_fields = len(fields_w) if fields_w else 0
+ if args_w and len(args_w) != num_fields:
+ if num_fields == 0:
+ raise oefmt(space.w_TypeError,
+ "%T constructor takes 0 positional arguments", w_self)
+ elif num_fields == 1:
+ raise oefmt(space.w_TypeError,
+ "%T constructor takes either 0 or %d positional argument", w_self, num_fields)
+ else:
+ raise oefmt(space.w_TypeError,
+ "%T constructor takes either 0 or %d positional arguments", w_self, num_fields)
+ if args_w:
+ for i, w_field in enumerate(fields_w):
+ space.setattr(w_self, w_field, args_w[i])
for field, w_value in kwargs_w.iteritems():
space.setattr(w_self, space.wrap(field), w_value)
-AST.typedef = typedef.TypeDef("_ast.AST",
+
+W_AST.typedef = typedef.TypeDef("_ast.AST",
_fields=_FieldsWrapper([]),
_attributes=_FieldsWrapper([]),
- __reduce__=interp2app(AST.reduce_w),
- __setstate__=interp2app(AST.setstate_w),
+ __reduce__=interp2app(W_AST.reduce_w),
+ __setstate__=interp2app(W_AST.setstate_w),
__dict__ = typedef.GetSetProperty(typedef.descr_get_dict,
- typedef.descr_set_dict, cls=AST),
- __new__=interp2app(get_AST_new(AST)),
- __init__=interp2app(AST_init),
+ typedef.descr_set_dict, cls=W_AST),
+ __new__=interp2app(W_AST_new),
+ __init__=interp2app(W_AST_init),
)
-
-
+class State:
+ AST_TYPES = []
+
+ @classmethod
+ def ast_type(cls, name, base, fields, attributes=None):
+ cls.AST_TYPES.append((name, base, fields, attributes))
+
+ def __init__(self, space):
+ self.w_AST = space.gettypeobject(W_AST.typedef)
+ for (name, base, fields, attributes) in self.AST_TYPES:
+ self.make_new_type(space, name, base, fields, attributes)
+
+ def make_new_type(self, space, name, base, fields, attributes):
+ w_base = getattr(self, 'w_%s' % base)
+ w_dict = space.newdict()
+ space.setitem_str(w_dict, '__module__', space.wrap('_ast'))
+ if fields is not None:
+ space.setitem_str(w_dict, "_fields",
+ space.newtuple([space.wrap(f) for f in fields]))
+ if attributes is not None:
+ space.setitem_str(w_dict, "_attributes",
+ space.newtuple([space.wrap(a) for a in attributes]))
+ w_type = space.call_function(
+ space.w_type,
+ space.wrap(name), space.newtuple([w_base]), w_dict)
+ setattr(self, 'w_%s' % name, w_type)
+
+def get(space):
+ return space.fromcache(State)
class mod(AST):
- pass
+ @staticmethod
+ def from_object(space, w_node):
+ if space.is_w(w_node, space.w_None):
+ return None
+ if space.isinstance_w(w_node, get(space).w_Module):
+ return Module.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Interactive):
+ return Interactive.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Expression):
+ return Expression.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Suite):
+ return Suite.from_object(space, w_node)
+ raise oefmt(space.w_TypeError,
+ "Expected mod node, got %T", w_node)
+State.ast_type('mod', 'AST', None, [])
class Module(mod):
def __init__(self, body):
self.body = body
- self.w_body = None
- self.initialization_state = 1
def walkabout(self, visitor):
visitor.visit_Module(self)
@@ -153,29 +186,30 @@
visitor._mutate_sequence(self.body)
return visitor.visit_Module(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 1:
- self.missing_field(space, ['body'], 'Module')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Module)
+ if self.body is None:
+ body_w = []
else:
- pass
- w_list = self.w_body
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.body = None
- if self.body is not None:
- for node in self.body:
- node.sync_app_attrs(space)
+ body_w = [node.to_object(space) for node in self.body] # stmt
+ w_body = space.newlist(body_w)
+ space.setattr(w_node, space.wrap('body'), w_body)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_body = get_field(space, w_node, 'body', False)
+ body_w = space.unpackiterable(w_body)
+ _body = [stmt.from_object(space, w_item) for w_item in body_w]
+ return Module(_body)
+
+State.ast_type('Module', 'mod', ['body'])
class Interactive(mod):
def __init__(self, body):
self.body = body
- self.w_body = None
- self.initialization_state = 1
def walkabout(self, visitor):
visitor.visit_Interactive(self)
@@ -185,28 +219,30 @@
visitor._mutate_sequence(self.body)
return visitor.visit_Interactive(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 1:
- self.missing_field(space, ['body'], 'Interactive')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Interactive)
+ if self.body is None:
+ body_w = []
else:
- pass
- w_list = self.w_body
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.body = None
- if self.body is not None:
- for node in self.body:
- node.sync_app_attrs(space)
+ body_w = [node.to_object(space) for node in self.body] # stmt
+ w_body = space.newlist(body_w)
+ space.setattr(w_node, space.wrap('body'), w_body)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_body = get_field(space, w_node, 'body', False)
+ body_w = space.unpackiterable(w_body)
+ _body = [stmt.from_object(space, w_item) for w_item in body_w]
+ return Interactive(_body)
+
+State.ast_type('Interactive', 'mod', ['body'])
class Expression(mod):
def __init__(self, body):
self.body = body
- self.initialization_state = 1
def walkabout(self, visitor):
visitor.visit_Expression(self)
@@ -215,20 +251,25 @@
self.body = self.body.mutate_over(visitor)
return visitor.visit_Expression(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 1:
- self.missing_field(space, ['body'], 'Expression')
- else:
- pass
- self.body.sync_app_attrs(space)
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Expression)
+ w_body = self.body.to_object(space) # expr
+ space.setattr(w_node, space.wrap('body'), w_body)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_body = get_field(space, w_node, 'body', False)
+ _body = expr.from_object(space, w_body)
+ return Expression(_body)
+
+State.ast_type('Expression', 'mod', ['body'])
class Suite(mod):
def __init__(self, body):
self.body = body
- self.w_body = None
- self.initialization_state = 1
def walkabout(self, visitor):
visitor.visit_Suite(self)
@@ -238,21 +279,24 @@
visitor._mutate_sequence(self.body)
return visitor.visit_Suite(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 1:
- self.missing_field(space, ['body'], 'Suite')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Suite)
+ if self.body is None:
+ body_w = []
else:
- pass
- w_list = self.w_body
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.body = None
- if self.body is not None:
- for node in self.body:
- node.sync_app_attrs(space)
+ body_w = [node.to_object(space) for node in self.body] # stmt
+ w_body = space.newlist(body_w)
+ space.setattr(w_node, space.wrap('body'), w_body)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_body = get_field(space, w_node, 'body', False)
+ body_w = space.unpackiterable(w_body)
+ _body = [stmt.from_object(space, w_item) for w_item in body_w]
+ return Suite(_body)
+
+State.ast_type('Suite', 'mod', ['body'])
class stmt(AST):
@@ -261,17 +305,68 @@
self.lineno = lineno
self.col_offset = col_offset
+ @staticmethod
+ def from_object(space, w_node):
+ if space.is_w(w_node, space.w_None):
+ return None
+ if space.isinstance_w(w_node, get(space).w_FunctionDef):
+ return FunctionDef.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_ClassDef):
+ return ClassDef.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Return):
+ return Return.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Delete):
+ return Delete.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Assign):
+ return Assign.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_AugAssign):
+ return AugAssign.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Print):
+ return Print.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_For):
+ return For.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_While):
+ return While.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_If):
+ return If.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_With):
+ return With.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Raise):
+ return Raise.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_TryExcept):
+ return TryExcept.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_TryFinally):
+ return TryFinally.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Assert):
+ return Assert.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Import):
+ return Import.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_ImportFrom):
+ return ImportFrom.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Exec):
+ return Exec.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Global):
+ return Global.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Expr):
+ return Expr.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Pass):
+ return Pass.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Break):
+ return Break.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Continue):
+ return Continue.from_object(space, w_node)
+ raise oefmt(space.w_TypeError,
+ "Expected stmt node, got %T", w_node)
+State.ast_type('stmt', 'AST', None, ['lineno', 'col_offset'])
+
class FunctionDef(stmt):
def __init__(self, name, args, body, decorator_list, lineno, col_offset):
self.name = name
self.args = args
self.body = body
- self.w_body = None
self.decorator_list = decorator_list
- self.w_decorator_list = None
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 63
def walkabout(self, visitor):
visitor.visit_FunctionDef(self)
@@ -284,32 +379,49 @@
visitor._mutate_sequence(self.decorator_list)
return visitor.visit_FunctionDef(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 63:
- self.missing_field(space, ['lineno', 'col_offset', 'name', 'args', 'body', 'decorator_list'], 'FunctionDef')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_FunctionDef)
+ w_name = space.wrap(self.name) # identifier
+ space.setattr(w_node, space.wrap('name'), w_name)
+ w_args = self.args.to_object(space) # arguments
+ space.setattr(w_node, space.wrap('args'), w_args)
+ if self.body is None:
+ body_w = []
else:
- pass
- self.args.sync_app_attrs(space)
- w_list = self.w_body
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.body = None
- if self.body is not None:
- for node in self.body:
- node.sync_app_attrs(space)
- w_list = self.w_decorator_list
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.decorator_list = [space.interp_w(expr, w_obj) for w_obj in list_w]
- else:
- self.decorator_list = None
- if self.decorator_list is not None:
- for node in self.decorator_list:
- node.sync_app_attrs(space)
+ body_w = [node.to_object(space) for node in self.body] # stmt
+ w_body = space.newlist(body_w)
+ space.setattr(w_node, space.wrap('body'), w_body)
+ if self.decorator_list is None:
+ decorator_list_w = []
+ else:
+ decorator_list_w = [node.to_object(space) for node in self.decorator_list] # expr
+ w_decorator_list = space.newlist(decorator_list_w)
+ space.setattr(w_node, space.wrap('decorator_list'), w_decorator_list)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_name = get_field(space, w_node, 'name', False)
+ w_args = get_field(space, w_node, 'args', False)
+ w_body = get_field(space, w_node, 'body', False)
+ w_decorator_list = get_field(space, w_node, 'decorator_list', False)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _name = space.realstr_w(w_name)
+ _args = arguments.from_object(space, w_args)
+ body_w = space.unpackiterable(w_body)
+ _body = [stmt.from_object(space, w_item) for w_item in body_w]
+ decorator_list_w = space.unpackiterable(w_decorator_list)
+ _decorator_list = [expr.from_object(space, w_item) for w_item in decorator_list_w]
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return FunctionDef(_name, _args, _body, _decorator_list, _lineno, _col_offset)
+
+State.ast_type('FunctionDef', 'stmt', ['name', 'args', 'body', 'decorator_list'])
class ClassDef(stmt):
@@ -317,13 +429,9 @@
def __init__(self, name, bases, body, decorator_list, lineno, col_offset):
self.name = name
self.bases = bases
- self.w_bases = None
self.body = body
- self.w_body = None
self.decorator_list = decorator_list
- self.w_decorator_list = None
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 63
def walkabout(self, visitor):
visitor.visit_ClassDef(self)
@@ -337,41 +445,54 @@
visitor._mutate_sequence(self.decorator_list)
return visitor.visit_ClassDef(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 63:
- self.missing_field(space, ['lineno', 'col_offset', 'name', 'bases', 'body', 'decorator_list'], 'ClassDef')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_ClassDef)
+ w_name = space.wrap(self.name) # identifier
+ space.setattr(w_node, space.wrap('name'), w_name)
+ if self.bases is None:
+ bases_w = []
else:
- pass
- w_list = self.w_bases
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.bases = [space.interp_w(expr, w_obj) for w_obj in list_w]
- else:
- self.bases = None
- if self.bases is not None:
- for node in self.bases:
- node.sync_app_attrs(space)
- w_list = self.w_body
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.body = None
- if self.body is not None:
- for node in self.body:
- node.sync_app_attrs(space)
- w_list = self.w_decorator_list
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.decorator_list = [space.interp_w(expr, w_obj) for w_obj in list_w]
- else:
- self.decorator_list = None
- if self.decorator_list is not None:
- for node in self.decorator_list:
- node.sync_app_attrs(space)
+ bases_w = [node.to_object(space) for node in self.bases] # expr
+ w_bases = space.newlist(bases_w)
+ space.setattr(w_node, space.wrap('bases'), w_bases)
+ if self.body is None:
+ body_w = []
+ else:
+ body_w = [node.to_object(space) for node in self.body] # stmt
+ w_body = space.newlist(body_w)
+ space.setattr(w_node, space.wrap('body'), w_body)
+ if self.decorator_list is None:
+ decorator_list_w = []
+ else:
+ decorator_list_w = [node.to_object(space) for node in self.decorator_list] # expr
+ w_decorator_list = space.newlist(decorator_list_w)
+ space.setattr(w_node, space.wrap('decorator_list'), w_decorator_list)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_name = get_field(space, w_node, 'name', False)
+ w_bases = get_field(space, w_node, 'bases', False)
+ w_body = get_field(space, w_node, 'body', False)
+ w_decorator_list = get_field(space, w_node, 'decorator_list', False)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _name = space.realstr_w(w_name)
+ bases_w = space.unpackiterable(w_bases)
+ _bases = [expr.from_object(space, w_item) for w_item in bases_w]
+ body_w = space.unpackiterable(w_body)
+ _body = [stmt.from_object(space, w_item) for w_item in body_w]
+ decorator_list_w = space.unpackiterable(w_decorator_list)
+ _decorator_list = [expr.from_object(space, w_item) for w_item in decorator_list_w]
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return ClassDef(_name, _bases, _body, _decorator_list, _lineno, _col_offset)
+
+State.ast_type('ClassDef', 'stmt', ['name', 'bases', 'body', 'decorator_list'])
class Return(stmt):
@@ -379,7 +500,6 @@
def __init__(self, value, lineno, col_offset):
self.value = value
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 7
def walkabout(self, visitor):
visitor.visit_Return(self)
@@ -389,23 +509,34 @@
self.value = self.value.mutate_over(visitor)
return visitor.visit_Return(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~4) ^ 3:
- self.missing_field(space, ['lineno', 'col_offset', None], 'Return')
- else:
- if not self.initialization_state & 4:
- self.value = None
- if self.value:
- self.value.sync_app_attrs(space)
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Return)
+ w_value = self.value.to_object(space) if self.value is not None else space.w_None # expr
+ space.setattr(w_node, space.wrap('value'), w_value)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_value = get_field(space, w_node, 'value', True)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _value = expr.from_object(space, w_value)
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return Return(_value, _lineno, _col_offset)
+
+State.ast_type('Return', 'stmt', ['value'])
class Delete(stmt):
def __init__(self, targets, lineno, col_offset):
self.targets = targets
- self.w_targets = None
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 7
def walkabout(self, visitor):
visitor.visit_Delete(self)
@@ -415,31 +546,40 @@
visitor._mutate_sequence(self.targets)
return visitor.visit_Delete(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 7:
- self.missing_field(space, ['lineno', 'col_offset', 'targets'], 'Delete')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Delete)
+ if self.targets is None:
+ targets_w = []
else:
- pass
- w_list = self.w_targets
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.targets = [space.interp_w(expr, w_obj) for w_obj in list_w]
- else:
- self.targets = None
- if self.targets is not None:
- for node in self.targets:
- node.sync_app_attrs(space)
+ targets_w = [node.to_object(space) for node in self.targets] # expr
+ w_targets = space.newlist(targets_w)
+ space.setattr(w_node, space.wrap('targets'), w_targets)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_targets = get_field(space, w_node, 'targets', False)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ targets_w = space.unpackiterable(w_targets)
+ _targets = [expr.from_object(space, w_item) for w_item in targets_w]
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return Delete(_targets, _lineno, _col_offset)
+
+State.ast_type('Delete', 'stmt', ['targets'])
class Assign(stmt):
def __init__(self, targets, value, lineno, col_offset):
self.targets = targets
- self.w_targets = None
self.value = value
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 15
def walkabout(self, visitor):
visitor.visit_Assign(self)
@@ -450,22 +590,36 @@
self.value = self.value.mutate_over(visitor)
return visitor.visit_Assign(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 15:
- self.missing_field(space, ['lineno', 'col_offset', 'targets', 'value'], 'Assign')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Assign)
+ if self.targets is None:
+ targets_w = []
else:
- pass
- w_list = self.w_targets
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.targets = [space.interp_w(expr, w_obj) for w_obj in list_w]
- else:
- self.targets = None
- if self.targets is not None:
- for node in self.targets:
- node.sync_app_attrs(space)
- self.value.sync_app_attrs(space)
+ targets_w = [node.to_object(space) for node in self.targets] # expr
+ w_targets = space.newlist(targets_w)
+ space.setattr(w_node, space.wrap('targets'), w_targets)
+ w_value = self.value.to_object(space) # expr
+ space.setattr(w_node, space.wrap('value'), w_value)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_targets = get_field(space, w_node, 'targets', False)
+ w_value = get_field(space, w_node, 'value', False)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ targets_w = space.unpackiterable(w_targets)
+ _targets = [expr.from_object(space, w_item) for w_item in targets_w]
+ _value = expr.from_object(space, w_value)
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return Assign(_targets, _value, _lineno, _col_offset)
+
+State.ast_type('Assign', 'stmt', ['targets', 'value'])
class AugAssign(stmt):
@@ -475,7 +629,6 @@
self.op = op
self.value = value
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 31
def walkabout(self, visitor):
visitor.visit_AugAssign(self)
@@ -485,13 +638,35 @@
self.value = self.value.mutate_over(visitor)
return visitor.visit_AugAssign(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 31:
- self.missing_field(space, ['lineno', 'col_offset', 'target', 'op', 'value'], 'AugAssign')
- else:
- pass
- self.target.sync_app_attrs(space)
- self.value.sync_app_attrs(space)
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_AugAssign)
+ w_target = self.target.to_object(space) # expr
+ space.setattr(w_node, space.wrap('target'), w_target)
+ w_op = operator_to_class[self.op - 1]().to_object(space) # operator
+ space.setattr(w_node, space.wrap('op'), w_op)
+ w_value = self.value.to_object(space) # expr
+ space.setattr(w_node, space.wrap('value'), w_value)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_target = get_field(space, w_node, 'target', False)
+ w_op = get_field(space, w_node, 'op', False)
+ w_value = get_field(space, w_node, 'value', False)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _target = expr.from_object(space, w_target)
+ _op = operator.from_object(space, w_op)
+ _value = expr.from_object(space, w_value)
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return AugAssign(_target, _op, _value, _lineno, _col_offset)
+
+State.ast_type('AugAssign', 'stmt', ['target', 'op', 'value'])
class Print(stmt):
@@ -499,10 +674,8 @@
def __init__(self, dest, values, nl, lineno, col_offset):
self.dest = dest
self.values = values
- self.w_values = None
self.nl = nl
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 31
def walkabout(self, visitor):
visitor.visit_Print(self)
@@ -514,24 +687,40 @@
visitor._mutate_sequence(self.values)
return visitor.visit_Print(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~4) ^ 27:
- self.missing_field(space, ['lineno', 'col_offset', None, 'values', 'nl'], 'Print')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Print)
+ w_dest = self.dest.to_object(space) if self.dest is not None else space.w_None # expr
+ space.setattr(w_node, space.wrap('dest'), w_dest)
+ if self.values is None:
+ values_w = []
else:
- if not self.initialization_state & 4:
- self.dest = None
- if self.dest:
- self.dest.sync_app_attrs(space)
- w_list = self.w_values
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.values = [space.interp_w(expr, w_obj) for w_obj in list_w]
- else:
- self.values = None
- if self.values is not None:
- for node in self.values:
- node.sync_app_attrs(space)
+ values_w = [node.to_object(space) for node in self.values] # expr
+ w_values = space.newlist(values_w)
+ space.setattr(w_node, space.wrap('values'), w_values)
+ w_nl = space.wrap(self.nl) # bool
+ space.setattr(w_node, space.wrap('nl'), w_nl)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_dest = get_field(space, w_node, 'dest', True)
+ w_values = get_field(space, w_node, 'values', False)
+ w_nl = get_field(space, w_node, 'nl', False)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _dest = expr.from_object(space, w_dest)
+ values_w = space.unpackiterable(w_values)
+ _values = [expr.from_object(space, w_item) for w_item in values_w]
+ _nl = space.bool_w(w_nl)
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return Print(_dest, _values, _nl, _lineno, _col_offset)
+
+State.ast_type('Print', 'stmt', ['dest', 'values', 'nl'])
class For(stmt):
@@ -540,11 +729,8 @@
self.target = target
self.iter = iter
self.body = body
- self.w_body = None
self.orelse = orelse
- self.w_orelse = None
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 63
def walkabout(self, visitor):
visitor.visit_For(self)
@@ -558,33 +744,49 @@
visitor._mutate_sequence(self.orelse)
return visitor.visit_For(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 63:
- self.missing_field(space, ['lineno', 'col_offset', 'target', 'iter', 'body', 'orelse'], 'For')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_For)
+ w_target = self.target.to_object(space) # expr
+ space.setattr(w_node, space.wrap('target'), w_target)
+ w_iter = self.iter.to_object(space) # expr
+ space.setattr(w_node, space.wrap('iter'), w_iter)
+ if self.body is None:
+ body_w = []
else:
- pass
- self.target.sync_app_attrs(space)
- self.iter.sync_app_attrs(space)
- w_list = self.w_body
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.body = None
- if self.body is not None:
- for node in self.body:
- node.sync_app_attrs(space)
- w_list = self.w_orelse
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.orelse = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.orelse = None
- if self.orelse is not None:
- for node in self.orelse:
- node.sync_app_attrs(space)
+ body_w = [node.to_object(space) for node in self.body] # stmt
+ w_body = space.newlist(body_w)
+ space.setattr(w_node, space.wrap('body'), w_body)
+ if self.orelse is None:
+ orelse_w = []
+ else:
+ orelse_w = [node.to_object(space) for node in self.orelse] # stmt
+ w_orelse = space.newlist(orelse_w)
+ space.setattr(w_node, space.wrap('orelse'), w_orelse)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_target = get_field(space, w_node, 'target', False)
+ w_iter = get_field(space, w_node, 'iter', False)
+ w_body = get_field(space, w_node, 'body', False)
+ w_orelse = get_field(space, w_node, 'orelse', False)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _target = expr.from_object(space, w_target)
+ _iter = expr.from_object(space, w_iter)
+ body_w = space.unpackiterable(w_body)
+ _body = [stmt.from_object(space, w_item) for w_item in body_w]
+ orelse_w = space.unpackiterable(w_orelse)
+ _orelse = [stmt.from_object(space, w_item) for w_item in orelse_w]
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return For(_target, _iter, _body, _orelse, _lineno, _col_offset)
+
+State.ast_type('For', 'stmt', ['target', 'iter', 'body', 'orelse'])
class While(stmt):
@@ -592,11 +794,8 @@
def __init__(self, test, body, orelse, lineno, col_offset):
self.test = test
self.body = body
- self.w_body = None
self.orelse = orelse
- self.w_orelse = None
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 31
def walkabout(self, visitor):
visitor.visit_While(self)
@@ -609,32 +808,45 @@
visitor._mutate_sequence(self.orelse)
return visitor.visit_While(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 31:
- self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'While')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_While)
+ w_test = self.test.to_object(space) # expr
+ space.setattr(w_node, space.wrap('test'), w_test)
+ if self.body is None:
+ body_w = []
else:
- pass
- self.test.sync_app_attrs(space)
- w_list = self.w_body
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.body = None
- if self.body is not None:
- for node in self.body:
- node.sync_app_attrs(space)
- w_list = self.w_orelse
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.orelse = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.orelse = None
- if self.orelse is not None:
- for node in self.orelse:
- node.sync_app_attrs(space)
+ body_w = [node.to_object(space) for node in self.body] # stmt
+ w_body = space.newlist(body_w)
+ space.setattr(w_node, space.wrap('body'), w_body)
+ if self.orelse is None:
+ orelse_w = []
+ else:
+ orelse_w = [node.to_object(space) for node in self.orelse] # stmt
+ w_orelse = space.newlist(orelse_w)
+ space.setattr(w_node, space.wrap('orelse'), w_orelse)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_test = get_field(space, w_node, 'test', False)
+ w_body = get_field(space, w_node, 'body', False)
+ w_orelse = get_field(space, w_node, 'orelse', False)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _test = expr.from_object(space, w_test)
+ body_w = space.unpackiterable(w_body)
+ _body = [stmt.from_object(space, w_item) for w_item in body_w]
+ orelse_w = space.unpackiterable(w_orelse)
+ _orelse = [stmt.from_object(space, w_item) for w_item in orelse_w]
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return While(_test, _body, _orelse, _lineno, _col_offset)
+
+State.ast_type('While', 'stmt', ['test', 'body', 'orelse'])
class If(stmt):
@@ -642,11 +854,8 @@
def __init__(self, test, body, orelse, lineno, col_offset):
self.test = test
self.body = body
- self.w_body = None
self.orelse = orelse
- self.w_orelse = None
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 31
def walkabout(self, visitor):
visitor.visit_If(self)
@@ -659,32 +868,45 @@
visitor._mutate_sequence(self.orelse)
return visitor.visit_If(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 31:
- self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'If')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_If)
+ w_test = self.test.to_object(space) # expr
+ space.setattr(w_node, space.wrap('test'), w_test)
+ if self.body is None:
+ body_w = []
else:
- pass
- self.test.sync_app_attrs(space)
- w_list = self.w_body
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.body = None
- if self.body is not None:
- for node in self.body:
- node.sync_app_attrs(space)
- w_list = self.w_orelse
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.orelse = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.orelse = None
- if self.orelse is not None:
- for node in self.orelse:
- node.sync_app_attrs(space)
+ body_w = [node.to_object(space) for node in self.body] # stmt
+ w_body = space.newlist(body_w)
+ space.setattr(w_node, space.wrap('body'), w_body)
+ if self.orelse is None:
+ orelse_w = []
+ else:
+ orelse_w = [node.to_object(space) for node in self.orelse] # stmt
+ w_orelse = space.newlist(orelse_w)
+ space.setattr(w_node, space.wrap('orelse'), w_orelse)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_test = get_field(space, w_node, 'test', False)
+ w_body = get_field(space, w_node, 'body', False)
+ w_orelse = get_field(space, w_node, 'orelse', False)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _test = expr.from_object(space, w_test)
+ body_w = space.unpackiterable(w_body)
+ _body = [stmt.from_object(space, w_item) for w_item in body_w]
+ orelse_w = space.unpackiterable(w_orelse)
+ _orelse = [stmt.from_object(space, w_item) for w_item in orelse_w]
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return If(_test, _body, _orelse, _lineno, _col_offset)
+
+State.ast_type('If', 'stmt', ['test', 'body', 'orelse'])
class With(stmt):
@@ -693,9 +915,7 @@
self.context_expr = context_expr
self.optional_vars = optional_vars
self.body = body
- self.w_body = None
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 31
def walkabout(self, visitor):
visitor.visit_With(self)
@@ -708,25 +928,40 @@
visitor._mutate_sequence(self.body)
return visitor.visit_With(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~8) ^ 23:
- self.missing_field(space, ['lineno', 'col_offset', 'context_expr', None, 'body'], 'With')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_With)
+ w_context_expr = self.context_expr.to_object(space) # expr
+ space.setattr(w_node, space.wrap('context_expr'), w_context_expr)
+ w_optional_vars = self.optional_vars.to_object(space) if self.optional_vars is not None else space.w_None # expr
+ space.setattr(w_node, space.wrap('optional_vars'), w_optional_vars)
+ if self.body is None:
+ body_w = []
else:
- if not self.initialization_state & 8:
- self.optional_vars = None
- self.context_expr.sync_app_attrs(space)
- if self.optional_vars:
- self.optional_vars.sync_app_attrs(space)
- w_list = self.w_body
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.body = None
- if self.body is not None:
- for node in self.body:
- node.sync_app_attrs(space)
+ body_w = [node.to_object(space) for node in self.body] # stmt
+ w_body = space.newlist(body_w)
+ space.setattr(w_node, space.wrap('body'), w_body)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_context_expr = get_field(space, w_node, 'context_expr', False)
+ w_optional_vars = get_field(space, w_node, 'optional_vars', True)
+ w_body = get_field(space, w_node, 'body', False)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _context_expr = expr.from_object(space, w_context_expr)
+ _optional_vars = expr.from_object(space, w_optional_vars)
+ body_w = space.unpackiterable(w_body)
+ _body = [stmt.from_object(space, w_item) for w_item in body_w]
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return With(_context_expr, _optional_vars, _body, _lineno, _col_offset)
+
+State.ast_type('With', 'stmt', ['context_expr', 'optional_vars', 'body'])
class Raise(stmt):
@@ -736,7 +971,6 @@
self.inst = inst
self.tback = tback
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 31
def walkabout(self, visitor):
visitor.visit_Raise(self)
@@ -750,35 +984,44 @@
self.tback = self.tback.mutate_over(visitor)
return visitor.visit_Raise(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~28) ^ 3:
- self.missing_field(space, ['lineno', 'col_offset', None, None, None], 'Raise')
- else:
- if not self.initialization_state & 4:
- self.type = None
- if not self.initialization_state & 8:
- self.inst = None
- if not self.initialization_state & 16:
- self.tback = None
- if self.type:
- self.type.sync_app_attrs(space)
- if self.inst:
- self.inst.sync_app_attrs(space)
- if self.tback:
- self.tback.sync_app_attrs(space)
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Raise)
+ w_type = self.type.to_object(space) if self.type is not None else space.w_None # expr
+ space.setattr(w_node, space.wrap('type'), w_type)
+ w_inst = self.inst.to_object(space) if self.inst is not None else space.w_None # expr
+ space.setattr(w_node, space.wrap('inst'), w_inst)
+ w_tback = self.tback.to_object(space) if self.tback is not None else space.w_None # expr
+ space.setattr(w_node, space.wrap('tback'), w_tback)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_type = get_field(space, w_node, 'type', True)
+ w_inst = get_field(space, w_node, 'inst', True)
+ w_tback = get_field(space, w_node, 'tback', True)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _type = expr.from_object(space, w_type)
+ _inst = expr.from_object(space, w_inst)
+ _tback = expr.from_object(space, w_tback)
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return Raise(_type, _inst, _tback, _lineno, _col_offset)
+
+State.ast_type('Raise', 'stmt', ['type', 'inst', 'tback'])
class TryExcept(stmt):
def __init__(self, body, handlers, orelse, lineno, col_offset):
self.body = body
- self.w_body = None
self.handlers = handlers
- self.w_handlers = None
self.orelse = orelse
- self.w_orelse = None
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 31
def walkabout(self, visitor):
visitor.visit_TryExcept(self)
@@ -792,52 +1035,58 @@
visitor._mutate_sequence(self.orelse)
return visitor.visit_TryExcept(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 31:
- self.missing_field(space, ['lineno', 'col_offset', 'body', 'handlers', 'orelse'], 'TryExcept')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_TryExcept)
+ if self.body is None:
+ body_w = []
else:
- pass
- w_list = self.w_body
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.body = None
- if self.body is not None:
- for node in self.body:
- node.sync_app_attrs(space)
- w_list = self.w_handlers
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.handlers = [space.interp_w(excepthandler, w_obj) for w_obj in list_w]
- else:
- self.handlers = None
- if self.handlers is not None:
- for node in self.handlers:
- node.sync_app_attrs(space)
- w_list = self.w_orelse
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.orelse = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.orelse = None
- if self.orelse is not None:
- for node in self.orelse:
- node.sync_app_attrs(space)
+ body_w = [node.to_object(space) for node in self.body] # stmt
+ w_body = space.newlist(body_w)
+ space.setattr(w_node, space.wrap('body'), w_body)
+ if self.handlers is None:
+ handlers_w = []
+ else:
+ handlers_w = [node.to_object(space) for node in self.handlers] # excepthandler
+ w_handlers = space.newlist(handlers_w)
+ space.setattr(w_node, space.wrap('handlers'), w_handlers)
+ if self.orelse is None:
+ orelse_w = []
+ else:
+ orelse_w = [node.to_object(space) for node in self.orelse] # stmt
+ w_orelse = space.newlist(orelse_w)
+ space.setattr(w_node, space.wrap('orelse'), w_orelse)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_body = get_field(space, w_node, 'body', False)
+ w_handlers = get_field(space, w_node, 'handlers', False)
+ w_orelse = get_field(space, w_node, 'orelse', False)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ body_w = space.unpackiterable(w_body)
+ _body = [stmt.from_object(space, w_item) for w_item in body_w]
+ handlers_w = space.unpackiterable(w_handlers)
+ _handlers = [excepthandler.from_object(space, w_item) for w_item in handlers_w]
+ orelse_w = space.unpackiterable(w_orelse)
+ _orelse = [stmt.from_object(space, w_item) for w_item in orelse_w]
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return TryExcept(_body, _handlers, _orelse, _lineno, _col_offset)
+
+State.ast_type('TryExcept', 'stmt', ['body', 'handlers', 'orelse'])
class TryFinally(stmt):
def __init__(self, body, finalbody, lineno, col_offset):
self.body = body
- self.w_body = None
self.finalbody = finalbody
- self.w_finalbody = None
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 15
def walkabout(self, visitor):
visitor.visit_TryFinally(self)
@@ -849,31 +1098,41 @@
visitor._mutate_sequence(self.finalbody)
return visitor.visit_TryFinally(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 15:
- self.missing_field(space, ['lineno', 'col_offset', 'body', 'finalbody'], 'TryFinally')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_TryFinally)
+ if self.body is None:
+ body_w = []
else:
- pass
- w_list = self.w_body
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.body = None
- if self.body is not None:
- for node in self.body:
- node.sync_app_attrs(space)
- w_list = self.w_finalbody
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.finalbody = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.finalbody = None
- if self.finalbody is not None:
- for node in self.finalbody:
- node.sync_app_attrs(space)
+ body_w = [node.to_object(space) for node in self.body] # stmt
+ w_body = space.newlist(body_w)
+ space.setattr(w_node, space.wrap('body'), w_body)
+ if self.finalbody is None:
+ finalbody_w = []
+ else:
+ finalbody_w = [node.to_object(space) for node in self.finalbody] # stmt
+ w_finalbody = space.newlist(finalbody_w)
+ space.setattr(w_node, space.wrap('finalbody'), w_finalbody)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_body = get_field(space, w_node, 'body', False)
+ w_finalbody = get_field(space, w_node, 'finalbody', False)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ body_w = space.unpackiterable(w_body)
+ _body = [stmt.from_object(space, w_item) for w_item in body_w]
+ finalbody_w = space.unpackiterable(w_finalbody)
+ _finalbody = [stmt.from_object(space, w_item) for w_item in finalbody_w]
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return TryFinally(_body, _finalbody, _lineno, _col_offset)
+
+State.ast_type('TryFinally', 'stmt', ['body', 'finalbody'])
class Assert(stmt):
@@ -882,7 +1141,6 @@
self.test = test
self.msg = msg
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 15
def walkabout(self, visitor):
visitor.visit_Assert(self)
@@ -893,24 +1151,38 @@
self.msg = self.msg.mutate_over(visitor)
return visitor.visit_Assert(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~8) ^ 7:
- self.missing_field(space, ['lineno', 'col_offset', 'test', None], 'Assert')
- else:
- if not self.initialization_state & 8:
- self.msg = None
- self.test.sync_app_attrs(space)
- if self.msg:
- self.msg.sync_app_attrs(space)
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Assert)
+ w_test = self.test.to_object(space) # expr
+ space.setattr(w_node, space.wrap('test'), w_test)
+ w_msg = self.msg.to_object(space) if self.msg is not None else space.w_None # expr
+ space.setattr(w_node, space.wrap('msg'), w_msg)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_test = get_field(space, w_node, 'test', False)
+ w_msg = get_field(space, w_node, 'msg', True)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _test = expr.from_object(space, w_test)
+ _msg = expr.from_object(space, w_msg)
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return Assert(_test, _msg, _lineno, _col_offset)
+
+State.ast_type('Assert', 'stmt', ['test', 'msg'])
class Import(stmt):
def __init__(self, names, lineno, col_offset):
self.names = names
- self.w_names = None
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 7
def walkabout(self, visitor):
visitor.visit_Import(self)
@@ -920,21 +1192,32 @@
visitor._mutate_sequence(self.names)
return visitor.visit_Import(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 7:
- self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Import')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Import)
+ if self.names is None:
+ names_w = []
else:
- pass
- w_list = self.w_names
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.names = [space.interp_w(alias, w_obj) for w_obj in list_w]
- else:
- self.names = None
- if self.names is not None:
- for node in self.names:
- node.sync_app_attrs(space)
+ names_w = [node.to_object(space) for node in self.names] # alias
+ w_names = space.newlist(names_w)
+ space.setattr(w_node, space.wrap('names'), w_names)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_names = get_field(space, w_node, 'names', False)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ names_w = space.unpackiterable(w_names)
+ _names = [alias.from_object(space, w_item) for w_item in names_w]
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return Import(_names, _lineno, _col_offset)
+
+State.ast_type('Import', 'stmt', ['names'])
class ImportFrom(stmt):
@@ -942,10 +1225,8 @@
def __init__(self, module, names, level, lineno, col_offset):
self.module = module
self.names = names
- self.w_names = None
self.level = level
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 31
def walkabout(self, visitor):
visitor.visit_ImportFrom(self)
@@ -955,24 +1236,40 @@
visitor._mutate_sequence(self.names)
return visitor.visit_ImportFrom(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~20) ^ 11:
- self.missing_field(space, ['lineno', 'col_offset', None, 'names', None], 'ImportFrom')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_ImportFrom)
+ w_module = space.wrap(self.module) # identifier
+ space.setattr(w_node, space.wrap('module'), w_module)
+ if self.names is None:
+ names_w = []
else:
- if not self.initialization_state & 4:
- self.module = None
- if not self.initialization_state & 16:
- self.level = 0
- w_list = self.w_names
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.names = [space.interp_w(alias, w_obj) for w_obj in list_w]
- else:
- self.names = None
- if self.names is not None:
- for node in self.names:
- node.sync_app_attrs(space)
+ names_w = [node.to_object(space) for node in self.names] # alias
+ w_names = space.newlist(names_w)
+ space.setattr(w_node, space.wrap('names'), w_names)
+ w_level = space.wrap(self.level) # int
+ space.setattr(w_node, space.wrap('level'), w_level)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_module = get_field(space, w_node, 'module', True)
+ w_names = get_field(space, w_node, 'names', False)
+ w_level = get_field(space, w_node, 'level', True)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _module = space.str_or_None_w(w_module)
+ names_w = space.unpackiterable(w_names)
+ _names = [alias.from_object(space, w_item) for w_item in names_w]
+ _level = space.int_w(w_level)
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return ImportFrom(_module, _names, _level, _lineno, _col_offset)
+
+State.ast_type('ImportFrom', 'stmt', ['module', 'names', 'level'])
class Exec(stmt):
@@ -982,7 +1279,6 @@
self.globals = globals
self.locals = locals
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 31
def walkabout(self, visitor):
visitor.visit_Exec(self)
@@ -995,28 +1291,42 @@
self.locals = self.locals.mutate_over(visitor)
return visitor.visit_Exec(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~24) ^ 7:
- self.missing_field(space, ['lineno', 'col_offset', 'body', None, None], 'Exec')
- else:
- if not self.initialization_state & 8:
- self.globals = None
- if not self.initialization_state & 16:
- self.locals = None
- self.body.sync_app_attrs(space)
- if self.globals:
- self.globals.sync_app_attrs(space)
- if self.locals:
- self.locals.sync_app_attrs(space)
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Exec)
+ w_body = self.body.to_object(space) # expr
+ space.setattr(w_node, space.wrap('body'), w_body)
+ w_globals = self.globals.to_object(space) if self.globals is not None else space.w_None # expr
+ space.setattr(w_node, space.wrap('globals'), w_globals)
+ w_locals = self.locals.to_object(space) if self.locals is not None else space.w_None # expr
+ space.setattr(w_node, space.wrap('locals'), w_locals)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_body = get_field(space, w_node, 'body', False)
+ w_globals = get_field(space, w_node, 'globals', True)
+ w_locals = get_field(space, w_node, 'locals', True)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _body = expr.from_object(space, w_body)
+ _globals = expr.from_object(space, w_globals)
+ _locals = expr.from_object(space, w_locals)
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return Exec(_body, _globals, _locals, _lineno, _col_offset)
+
+State.ast_type('Exec', 'stmt', ['body', 'globals', 'locals'])
class Global(stmt):
def __init__(self, names, lineno, col_offset):
self.names = names
- self.w_names = None
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 7
def walkabout(self, visitor):
visitor.visit_Global(self)
@@ -1024,18 +1334,32 @@
def mutate_over(self, visitor):
return visitor.visit_Global(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 7:
- self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Global')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Global)
+ if self.names is None:
+ names_w = []
else:
- pass
- w_list = self.w_names
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.names = [space.realstr_w(w_obj) for w_obj in list_w]
- else:
- self.names = None
+ names_w = [space.wrap(node) for node in self.names] # identifier
+ w_names = space.newlist(names_w)
+ space.setattr(w_node, space.wrap('names'), w_names)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_names = get_field(space, w_node, 'names', False)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ names_w = space.unpackiterable(w_names)
+ _names = [space.realstr_w(w_item) for w_item in names_w]
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return Global(_names, _lineno, _col_offset)
+
+State.ast_type('Global', 'stmt', ['names'])
class Expr(stmt):
@@ -1043,7 +1367,6 @@
def __init__(self, value, lineno, col_offset):
self.value = value
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 7
def walkabout(self, visitor):
visitor.visit_Expr(self)
@@ -1052,19 +1375,33 @@
self.value = self.value.mutate_over(visitor)
return visitor.visit_Expr(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 7:
- self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Expr')
- else:
- pass
- self.value.sync_app_attrs(space)
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Expr)
+ w_value = self.value.to_object(space) # expr
+ space.setattr(w_node, space.wrap('value'), w_value)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_value = get_field(space, w_node, 'value', False)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _value = expr.from_object(space, w_value)
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return Expr(_value, _lineno, _col_offset)
+
+State.ast_type('Expr', 'stmt', ['value'])
class Pass(stmt):
def __init__(self, lineno, col_offset):
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 3
def walkabout(self, visitor):
visitor.visit_Pass(self)
@@ -1072,18 +1409,29 @@
def mutate_over(self, visitor):
return visitor.visit_Pass(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 3:
- self.missing_field(space, ['lineno', 'col_offset'], 'Pass')
- else:
- pass
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Pass)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return Pass(_lineno, _col_offset)
+
+State.ast_type('Pass', 'stmt', [])
class Break(stmt):
def __init__(self, lineno, col_offset):
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 3
def walkabout(self, visitor):
visitor.visit_Break(self)
@@ -1091,18 +1439,29 @@
def mutate_over(self, visitor):
return visitor.visit_Break(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 3:
- self.missing_field(space, ['lineno', 'col_offset'], 'Break')
- else:
- pass
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Break)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return Break(_lineno, _col_offset)
+
+State.ast_type('Break', 'stmt', [])
class Continue(stmt):
def __init__(self, lineno, col_offset):
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 3
def walkabout(self, visitor):
visitor.visit_Continue(self)
@@ -1110,11 +1469,23 @@
def mutate_over(self, visitor):
return visitor.visit_Continue(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 3:
- self.missing_field(space, ['lineno', 'col_offset'], 'Continue')
- else:
- pass
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Continue)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return Continue(_lineno, _col_offset)
+
+State.ast_type('Continue', 'stmt', [])
class expr(AST):
@@ -1123,14 +1494,66 @@
self.lineno = lineno
self.col_offset = col_offset
+ @staticmethod
+ def from_object(space, w_node):
+ if space.is_w(w_node, space.w_None):
+ return None
+ if space.isinstance_w(w_node, get(space).w_BoolOp):
+ return BoolOp.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_BinOp):
+ return BinOp.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_UnaryOp):
+ return UnaryOp.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Lambda):
+ return Lambda.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_IfExp):
+ return IfExp.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Dict):
+ return Dict.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Set):
+ return Set.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_ListComp):
+ return ListComp.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_SetComp):
+ return SetComp.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_DictComp):
+ return DictComp.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_GeneratorExp):
+ return GeneratorExp.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Yield):
+ return Yield.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Compare):
+ return Compare.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Call):
+ return Call.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Repr):
+ return Repr.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Num):
+ return Num.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Str):
+ return Str.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Attribute):
+ return Attribute.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Subscript):
+ return Subscript.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Name):
+ return Name.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_List):
+ return List.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Tuple):
+ return Tuple.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Const):
+ return Const.from_object(space, w_node)
+ raise oefmt(space.w_TypeError,
+ "Expected expr node, got %T", w_node)
+State.ast_type('expr', 'AST', None, ['lineno', 'col_offset'])
+
class BoolOp(expr):
def __init__(self, op, values, lineno, col_offset):
self.op = op
self.values = values
- self.w_values = None
expr.__init__(self, lineno, col_offset)
- self.initialization_state = 15
def walkabout(self, visitor):
visitor.visit_BoolOp(self)
@@ -1140,21 +1563,36 @@
visitor._mutate_sequence(self.values)
return visitor.visit_BoolOp(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 15:
- self.missing_field(space, ['lineno', 'col_offset', 'op', 'values'], 'BoolOp')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_BoolOp)
+ w_op = boolop_to_class[self.op - 1]().to_object(space) # boolop
+ space.setattr(w_node, space.wrap('op'), w_op)
+ if self.values is None:
+ values_w = []
else:
- pass
From noreply at buildbot.pypy.org Mon Aug 18 16:05:36 2014
From: noreply at buildbot.pypy.org (amauryfa)
Date: Mon, 18 Aug 2014 16:05:36 +0200 (CEST)
Subject: [pypy-commit] pypy default: Fix merge
Message-ID: <20140818140536.0ECF61D22EF@cobra.cs.uni-duesseldorf.de>
Author: Amaury Forgeot d'Arc
Branch:
Changeset: r72879:0e073fccc124
Date: 2014-08-18 00:55 +0200
http://bitbucket.org/pypy/pypy/changeset/0e073fccc124/
Log: Fix merge
diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py
--- a/pypy/interpreter/astcompiler/ast.py
+++ b/pypy/interpreter/astcompiler/ast.py
@@ -10,7 +10,7 @@
def raise_attriberr(space, w_obj, name):
raise oefmt(space.w_AttributeError,
- \"'%T' object has no attribute '%s'\", w_obj, name)
+ "'%T' object has no attribute '%s'", w_obj, name)
def check_string(space, w_obj):
diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py
--- a/pypy/interpreter/astcompiler/tools/asdl_py.py
+++ b/pypy/interpreter/astcompiler/tools/asdl_py.py
@@ -393,7 +393,7 @@
def raise_attriberr(space, w_obj, name):
raise oefmt(space.w_AttributeError,
- \"'%T' object has no attribute '%s'\", w_obj, name)
+ "'%T' object has no attribute '%s'", w_obj, name)
def check_string(space, w_obj):
From noreply at buildbot.pypy.org Mon Aug 18 16:05:37 2014
From: noreply at buildbot.pypy.org (amauryfa)
Date: Mon, 18 Aug 2014 16:05:37 +0200 (CEST)
Subject: [pypy-commit] pypy default: Fix after merge.
Message-ID: <20140818140537.337AC1D22EF@cobra.cs.uni-duesseldorf.de>
Author: Amaury Forgeot d'Arc
Branch:
Changeset: r72880:0da7325d54c6
Date: 2014-08-18 09:16 +0200
http://bitbucket.org/pypy/pypy/changeset/0da7325d54c6/
Log: Fix after merge.
diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py
--- a/pypy/module/__builtin__/compiling.py
+++ b/pypy/module/__builtin__/compiling.py
@@ -59,9 +59,8 @@
"compile() expected string without null bytes"))
if flags & consts.PyCF_ONLY_AST:
- mode = ec.compiler.compile_to_ast(str_, filename, mode, flags)
- w_node = node.to_object(space)
- return w_node
+ node = ec.compiler.compile_to_ast(source, filename, mode, flags)
+ return node.to_object(space)
else:
code = ec.compiler.compile(source, filename, mode, flags)
return space.wrap(code)
From noreply at buildbot.pypy.org Mon Aug 18 16:05:38 2014
From: noreply at buildbot.pypy.org (amauryfa)
Date: Mon, 18 Aug 2014 16:05:38 +0200 (CEST)
Subject: [pypy-commit] pypy default: Merge branch split-ast-classes:
Message-ID: <20140818140538.4C25F1D22EF@cobra.cs.uni-duesseldorf.de>
Author: Amaury Forgeot d'Arc
Branch:
Changeset: r72881:e3c463bd6f19
Date: 2014-08-18 15:59 +0200
http://bitbucket.org/pypy/pypy/changeset/e3c463bd6f19/
Log: Merge branch split-ast-classes: classes in the ast modules are now
distinct from the node types used by the compiler.
This removes all the hacks to keep attributes in sync, and will
reduce memory needed to compile a module.
From noreply at buildbot.pypy.org Mon Aug 18 16:05:39 2014
From: noreply at buildbot.pypy.org (amauryfa)
Date: Mon, 18 Aug 2014 16:05:39 +0200 (CEST)
Subject: [pypy-commit] pypy default: Add doc for the new merged branch
Message-ID: <20140818140539.6CE861D22EF@cobra.cs.uni-duesseldorf.de>
Author: Amaury Forgeot d'Arc
Branch:
Changeset: r72882:4ebcd372fbbc
Date: 2014-08-18 16:02 +0200
http://bitbucket.org/pypy/pypy/changeset/4ebcd372fbbc/
Log: Add doc for the new merged branch
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -54,3 +54,6 @@
.. branch: pytest-25
Update our copies of py.test and pylib to versions 2.5.2 and 1.4.20,
respectively.
+
+.. branch: split-ast-classes
+Classes in the ast module are now distinct from structures used by the compiler.
From noreply at buildbot.pypy.org Mon Aug 18 16:10:10 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Mon, 18 Aug 2014 16:10:10 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Attempt to fix
CALL_RELEASE_GIL with stm
Message-ID: <20140818141010.9B4631C059C@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72883:e9aa2fdab146
Date: 2014-08-18 15:31 +0200
http://bitbucket.org/pypy/pypy/changeset/e9aa2fdab146/
Log: Attempt to fix CALL_RELEASE_GIL with stm
diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py
--- a/rpython/jit/backend/llsupport/assembler.py
+++ b/rpython/jit/backend/llsupport/assembler.py
@@ -361,6 +361,8 @@
lltype.Void))
def _build_release_gil(self, gcrootmap):
+ if self.gc_ll_descr.stm:
+ return
if gcrootmap is None or gcrootmap.is_shadow_stack:
reacqgil_func = llhelper(self._REACQGIL0_FUNC,
self._reacquire_gil_shadowstack)
diff --git a/rpython/jit/backend/llsupport/callbuilder.py b/rpython/jit/backend/llsupport/callbuilder.py
--- a/rpython/jit/backend/llsupport/callbuilder.py
+++ b/rpython/jit/backend/llsupport/callbuilder.py
@@ -1,5 +1,5 @@
from rpython.rlib.clibffi import FFI_DEFAULT_ABI
-from rpython.rlib import rgil
+from rpython.rlib import rgc, rgil
from rpython.rtyper.lltypesystem import lltype, rffi
@@ -45,7 +45,10 @@
def emit_call_release_gil(self):
"""Emit a CALL_RELEASE_GIL, including calls to releasegil_addr
and reacqgil_addr."""
- fastgil = rffi.cast(lltype.Signed, rgil.gil_fetch_fastgil())
+ if rgc.stm_is_enabled():
+ fastgil = 0
+ else:
+ fastgil = rffi.cast(lltype.Signed, rgil.gil_fetch_fastgil())
self.select_call_release_gil_mode()
self.prepare_arguments()
self.push_gcmap_for_call_release_gil()
diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py
--- a/rpython/jit/backend/x86/callbuilder.py
+++ b/rpython/jit/backend/x86/callbuilder.py
@@ -95,6 +95,10 @@
def call_releasegil_addr_and_move_real_arguments(self, fastgil):
from rpython.jit.backend.x86.assembler import heap
#
+ if self.asm.cpu.gc_ll_descr.stm:
+ self.call_stm_before_ex_call()
+ return
+ #
if not self.asm._is_asmgcc():
# shadowstack: change 'rpy_fastgil' to 0 (it should be
# non-zero right now).
@@ -132,6 +136,10 @@
def move_real_result_and_call_reacqgil_addr(self, fastgil):
from rpython.jit.backend.x86 import rx86
#
+ if self.asm.cpu.gc_ll_descr.stm:
+ self.call_stm_after_ex_call()
+ return
+ #
# check if we need to call the reacqgil() function or not
# (to acquiring the GIL, remove the asmgcc head from
# the chained list, etc.)
@@ -482,6 +490,41 @@
assert self.restype == INT
self.mc.MOV_rs(eax.value, 0)
+ def call_stm_before_ex_call(self):
+ # XXX slowish: before any CALL_RELEASE_GIL, invoke the
+ # pypy_stm_commit_if_not_atomic() function. Messy because
+ # we need to save the register arguments first.
+ #
+ n = min(self.next_arg_gpr, len(self.ARGUMENTS_GPR))
+ for i in range(n):
+ self.mc.PUSH_r(self.ARGUMENTS_GPR[i].value) # PUSH gpr arg
+ m = min(self.next_arg_xmm, len(self.ARGUMENTS_XMM))
+ extra = m + ((n + m) & 1)
+ # in total the stack is moved down by (n + extra) words,
+ # which needs to be an even value for alignment:
+ assert ((n + extra) & 1) == 0
+ if extra > 0:
+ self.mc.SUB_ri(esp.value, extra * WORD) # SUB rsp, extra
+ for i in range(m):
+ self.mc.MOVSD_sx(i * WORD, self.ARGUMENTS_XMM[i].value)
+ # MOVSD [rsp+..], xmm
+ #
+ self.mc.CALL(imm(rstm.adr_pypy_stm_commit_if_not_atomic))
+ #
+ if extra > 0:
+ for i in range(m):
+ self.mc.MOVSD_xs(self.ARGUMENTS_XMM[i].value, i * WORD)
+ self.mc.ADD_ri(esp.value, extra * WORD)
+ for i in range(n-1, -1, -1):
+ self.mc.POP_r(self.ARGUMENTS_GPR[i].value)
+
+ def call_stm_after_ex_call(self):
+ # after any CALL_RELEASE_GIL, invoke the
+ # pypy_stm_start_if_not_atomic() function
+ self.save_result_value_reacq()
+ self.mc.CALL(imm(rstm.adr_pypy_stm_start_if_not_atomic))
+ self.restore_result_value_reacq()
+
if IS_X86_32:
CallBuilder = CallBuilder32
diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py
--- a/rpython/rlib/rstm.py
+++ b/rpython/rlib/rstm.py
@@ -35,6 +35,10 @@
adr_pypy__rewind_jmp_copy_stack_slice = (
CFlexSymbolic('((long)&pypy__rewind_jmp_copy_stack_slice)'))
+adr_pypy_stm_commit_if_not_atomic = (
+ CFlexSymbolic('((long)&pypy_stm_commit_if_not_atomic)'))
+adr_pypy_stm_start_if_not_atomic = (
+ CFlexSymbolic('((long)&pypy_stm_start_if_not_atomic)'))
def rewind_jmp_frame():
From noreply at buildbot.pypy.org Mon Aug 18 16:10:11 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Mon, 18 Aug 2014 16:10:11 +0200 (CEST)
Subject: [pypy-commit] pypy default: Improve the test
Message-ID: <20140818141011.D73021C059C@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r72884:7a0f310a4651
Date: 2014-08-18 16:03 +0200
http://bitbucket.org/pypy/pypy/changeset/7a0f310a4651/
Log: Improve the test
diff --git a/rpython/jit/backend/llsupport/test/test_gc.py b/rpython/jit/backend/llsupport/test/test_gc.py
--- a/rpython/jit/backend/llsupport/test/test_gc.py
+++ b/rpython/jit/backend/llsupport/test/test_gc.py
@@ -261,6 +261,7 @@
if isinstance(TP, lltype.Ptr) and TP.TO._gckind == 'gc':
assert all_addrs[counter] == frame_adr + jitframe.getofs(name)
counter += 1
+ assert counter == 5
# gcpattern
assert all_addrs[5] == indexof(0)
assert all_addrs[6] == indexof(1)
@@ -269,13 +270,18 @@
assert all_addrs[9] == indexof(7)
if sys.maxint == 2**31 - 1:
assert all_addrs[10] == indexof(31)
- assert all_addrs[11] == indexof(33 + 32)
+ assert all_addrs[11] == indexof(65)
+ assert all_addrs[12] == indexof(68)
+ assert all_addrs[13] == indexof(69)
+ assert all_addrs[14] == indexof(71)
else:
assert all_addrs[10] == indexof(63)
- assert all_addrs[11] == indexof(65 + 64)
+ assert all_addrs[11] == indexof(129)
+ assert all_addrs[12] == indexof(132)
+ assert all_addrs[13] == indexof(133)
+ assert all_addrs[14] == indexof(135)
- assert len(all_addrs) == 5 + 6 + 4
- # 5 static fields, 4 addresses from gcmap, 2 from gcpattern
+ assert len(all_addrs) == 15
lltype.free(frame_info, flavor='raw')
lltype.free(frame.jf_gcmap, flavor='raw')
From noreply at buildbot.pypy.org Mon Aug 18 16:10:13 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Mon, 18 Aug 2014 16:10:13 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: hg merge default
Message-ID: <20140818141013.1F6B71C059C@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72885:ed6d5bd73896
Date: 2014-08-18 16:03 +0200
http://bitbucket.org/pypy/pypy/changeset/ed6d5bd73896/
Log: hg merge default
diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py
--- a/rpython/annotator/test/test_annrpython.py
+++ b/rpython/annotator/test/test_annrpython.py
@@ -4301,6 +4301,38 @@
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeString)
+ def test_isinstance_str_1(self):
+ def g():
+ pass
+ def f(n):
+ if n > 5:
+ s = "foo"
+ else:
+ s = None
+ g()
+ return isinstance(s, str)
+ a = self.RPythonAnnotator()
+ s = a.build_types(f, [int])
+ assert isinstance(s, annmodel.SomeBool)
+ assert not s.is_constant()
+
+ def test_isinstance_str_2(self):
+ def g():
+ pass
+ def f(n):
+ if n > 5:
+ s = "foo"
+ else:
+ s = None
+ g()
+ if isinstance(s, str):
+ return s
+ return ""
+ a = self.RPythonAnnotator()
+ s = a.build_types(f, [int])
+ assert isinstance(s, annmodel.SomeString)
+ assert not s.can_be_none()
+
def g(n):
return [0, 1, 2, n]
diff --git a/rpython/jit/backend/llsupport/test/test_gc.py b/rpython/jit/backend/llsupport/test/test_gc.py
--- a/rpython/jit/backend/llsupport/test/test_gc.py
+++ b/rpython/jit/backend/llsupport/test/test_gc.py
@@ -263,6 +263,7 @@
if isinstance(TP, lltype.Ptr) and TP.TO._gckind == 'gc':
assert all_addrs[counter] == frame_adr + jitframe.getofs(name)
counter += 1
+ assert counter == 5
# gcpattern
assert all_addrs[5] == indexof(0)
assert all_addrs[6] == indexof(1)
@@ -271,13 +272,18 @@
assert all_addrs[9] == indexof(7)
if sys.maxint == 2**31 - 1:
assert all_addrs[10] == indexof(31)
- assert all_addrs[11] == indexof(33 + 32)
+ assert all_addrs[11] == indexof(65)
+ assert all_addrs[12] == indexof(68)
+ assert all_addrs[13] == indexof(69)
+ assert all_addrs[14] == indexof(71)
else:
assert all_addrs[10] == indexof(63)
- assert all_addrs[11] == indexof(65 + 64)
+ assert all_addrs[11] == indexof(129)
+ assert all_addrs[12] == indexof(132)
+ assert all_addrs[13] == indexof(133)
+ assert all_addrs[14] == indexof(135)
- assert len(all_addrs) == 5 + 6 + 4
- # 5 static fields, 4 addresses from gcmap, 2 from gcpattern
+ assert len(all_addrs) == 15
lltype.free(frame_info, flavor='raw')
lltype.free(frame.jf_gcmap, flavor='raw')
diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py
--- a/rpython/rtyper/rbuiltin.py
+++ b/rpython/rtyper/rbuiltin.py
@@ -693,13 +693,14 @@
if hop.s_result.is_constant():
return hop.inputconst(lltype.Bool, hop.s_result.const)
- if hop.args_s[1].is_constant() and hop.args_s[1].const == list:
- if hop.args_s[0].knowntype != list:
- raise TyperError("isinstance(x, list) expects x to be known statically to be a list or None")
- rlist = hop.args_r[0]
- vlist = hop.inputarg(rlist, arg=0)
- cnone = hop.inputconst(rlist, None)
- return hop.genop('ptr_ne', [vlist, cnone], resulttype=lltype.Bool)
+ if hop.args_s[1].is_constant() and hop.args_s[1].const in (str, list):
+ if hop.args_s[0].knowntype not in (str, list):
+ raise TyperError("isinstance(x, str/list) expects x to be known"
+ " statically to be a str/list or None")
+ rstrlist = hop.args_r[0]
+ vstrlist = hop.inputarg(rstrlist, arg=0)
+ cnone = hop.inputconst(rstrlist, None)
+ return hop.genop('ptr_ne', [vstrlist, cnone], resulttype=lltype.Bool)
assert isinstance(hop.args_r[0], rclass.InstanceRepr)
return hop.args_r[0].rtype_isinstance(hop)
diff --git a/rpython/rtyper/test/test_rbuiltin.py b/rpython/rtyper/test/test_rbuiltin.py
--- a/rpython/rtyper/test/test_rbuiltin.py
+++ b/rpython/rtyper/test/test_rbuiltin.py
@@ -364,17 +364,35 @@
assert res == isinstance([A(), B(), C()][x-1], [A, B, C][y-1]) * 3
def test_isinstance_list(self):
+ def g():
+ pass
def f(i):
if i == 0:
l = []
else:
l = None
+ g()
return isinstance(l, list)
res = self.interpret(f, [0])
assert res is True
res = self.interpret(f, [1])
assert res is False
+ def test_isinstance_str(self):
+ def g():
+ pass
+ def f(i):
+ if i == 0:
+ l = "foobar"
+ else:
+ l = None
+ g()
+ return isinstance(l, str)
+ res = self.interpret(f, [0])
+ assert res is True
+ res = self.interpret(f, [1])
+ assert res is False
+
def test_instantiate(self):
class A:
pass
From noreply at buildbot.pypy.org Mon Aug 18 16:10:14 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Mon, 18 Aug 2014 16:10:14 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Fix
Message-ID: <20140818141014.4EF7D1C059C@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72886:f7f9ae07a05b
Date: 2014-08-18 16:07 +0200
http://bitbucket.org/pypy/pypy/changeset/f7f9ae07a05b/
Log: Fix
diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py
--- a/rpython/jit/backend/llsupport/assembler.py
+++ b/rpython/jit/backend/llsupport/assembler.py
@@ -114,7 +114,8 @@
self._build_cond_call_slowpath(True, True)]
self._build_stack_check_slowpath()
- self._build_release_gil(gc_ll_descr.gcrootmap)
+ if not gc_ll_descr.stm:
+ self._build_release_gil(gc_ll_descr.gcrootmap)
if not self._debug:
# if self._debug is already set it means that someone called
# set_debug by hand before initializing the assembler. Leave it
@@ -361,8 +362,6 @@
lltype.Void))
def _build_release_gil(self, gcrootmap):
- if self.gc_ll_descr.stm:
- return
if gcrootmap is None or gcrootmap.is_shadow_stack:
reacqgil_func = llhelper(self._REACQGIL0_FUNC,
self._reacquire_gil_shadowstack)
From noreply at buildbot.pypy.org Mon Aug 18 16:10:15 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Mon, 18 Aug 2014 16:10:15 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Fix test
Message-ID: <20140818141015.7DDFD1C059C@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72887:0c92aa1a24e2
Date: 2014-08-18 16:08 +0200
http://bitbucket.org/pypy/pypy/changeset/0c92aa1a24e2/
Log: Fix test
diff --git a/rpython/jit/backend/llsupport/test/test_gc.py b/rpython/jit/backend/llsupport/test/test_gc.py
--- a/rpython/jit/backend/llsupport/test/test_gc.py
+++ b/rpython/jit/backend/llsupport/test/test_gc.py
@@ -2,7 +2,7 @@
from rpython.rtyper.lltypesystem import lltype, llmemory, rstr
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.rtyper.annlowlevel import llhelper
-from rpython.jit.backend.llsupport import jitframe, gc, descr
+from rpython.jit.backend.llsupport import jitframe, gc, descr, gcmap
from rpython.jit.backend.llsupport import symbolic
from rpython.jit.metainterp.gc import get_description
from rpython.jit.metainterp.history import BoxPtr, BoxInt, ConstPtr
@@ -242,7 +242,8 @@
frame_info = lltype.malloc(jitframe.JITFRAMEINFO, zero=True, flavor='raw')
frame = lltype.malloc(jitframe.JITFRAME, 200, zero=True)
frame.jf_frame_info = frame_info
- frame.jf_gcmap = lltype.malloc(jitframe.GCMAP, 4, flavor='raw')
+ frame.jf_gcmap = lltype.malloc(jitframe.GCMAP, 4 + gcmap.GCMAP_STM_LOCATION,
+ flavor='raw')
if sys.maxint == 2**31 - 1:
max = r_uint(2 ** 31)
else:
From noreply at buildbot.pypy.org Mon Aug 18 16:10:16 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Mon, 18 Aug 2014 16:10:16 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: merge heads
Message-ID: <20140818141016.A1EDD1C059C@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72888:11d083257426
Date: 2014-08-18 16:08 +0200
http://bitbucket.org/pypy/pypy/changeset/11d083257426/
Log: merge heads
diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py
--- a/rpython/jit/backend/llsupport/assembler.py
+++ b/rpython/jit/backend/llsupport/assembler.py
@@ -114,7 +114,8 @@
self._build_cond_call_slowpath(True, True)]
self._build_stack_check_slowpath()
- self._build_release_gil(gc_ll_descr.gcrootmap)
+ if not gc_ll_descr.stm:
+ self._build_release_gil(gc_ll_descr.gcrootmap)
if not self._debug:
# if self._debug is already set it means that someone called
# set_debug by hand before initializing the assembler. Leave it
@@ -361,8 +362,6 @@
lltype.Void))
def _build_release_gil(self, gcrootmap):
- if self.gc_ll_descr.stm:
- return
if gcrootmap is None or gcrootmap.is_shadow_stack:
reacqgil_func = llhelper(self._REACQGIL0_FUNC,
self._reacquire_gil_shadowstack)
From noreply at buildbot.pypy.org Mon Aug 18 16:10:18 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Mon, 18 Aug 2014 16:10:18 +0200 (CEST)
Subject: [pypy-commit] pypy default: merge heads
Message-ID: <20140818141018.0C7141C059C@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r72889:16d07ec7276a
Date: 2014-08-18 16:09 +0200
http://bitbucket.org/pypy/pypy/changeset/16d07ec7276a/
Log: merge heads
diff too long, truncating to 2000 out of 10269 lines
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -54,3 +54,6 @@
.. branch: pytest-25
Update our copies of py.test and pylib to versions 2.5.2 and 1.4.20,
respectively.
+
+.. branch: split-ast-classes
+Classes in the ast module are now distinct from structures used by the compiler.
diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py
--- a/pypy/interpreter/astcompiler/ast.py
+++ b/pypy/interpreter/astcompiler/ast.py
@@ -1,5 +1,4 @@
# Generated by tools/asdl_py.py
-from rpython.rlib.unroll import unrolling_iterable
from rpython.tool.pairtype import extendabletype
from rpython.tool.sourcetools import func_with_new_name
@@ -21,11 +20,15 @@
'AST string must be of type str or unicode'))
return w_obj
-
-class AST(W_Root):
-
- w_dict = None
-
+def get_field(space, w_node, name, optional):
+ w_obj = w_node.getdictvalue(space, name)
+ if w_obj is None and not optional:
+ raise oefmt(space.w_TypeError,
+ "required field \"%s\" missing from %T", name, w_node)
+ return w_obj
+
+
+class AST(object):
__metaclass__ = extendabletype
def walkabout(self, visitor):
@@ -34,8 +37,23 @@
def mutate_over(self, visitor):
raise AssertionError("mutate_over() implementation not provided")
- def sync_app_attrs(self, space):
- raise NotImplementedError
+
+class NodeVisitorNotImplemented(Exception):
+ pass
+
+
+class _FieldsWrapper(W_Root):
+ "Hack around the fact we can't store tuples on a TypeDef."
+
+ def __init__(self, fields):
+ self.fields = fields
+
+ def __spacebind__(self, space):
+ return space.newtuple([space.wrap(field) for field in self.fields])
+
+
+class W_AST(W_Root):
+ w_dict = None
def getdict(self, space):
if self.w_dict is None:
@@ -47,7 +65,7 @@
if w_dict is None:
w_dict = space.newdict()
w_type = space.type(self)
- w_fields = w_type.getdictvalue(space, "_fields")
+ w_fields = space.getattr(w_type, space.wrap("_fields"))
for w_name in space.fixedview(w_fields):
try:
space.setitem(w_dict, w_name,
@@ -71,79 +89,94 @@
space.setattr(self, w_name,
space.getitem(w_state, w_name))
- def missing_field(self, space, required, host):
- "Find which required field is missing."
- state = self.initialization_state
- for i in range(len(required)):
- if (state >> i) & 1:
- continue # field is present
- missing = required[i]
- if missing is None:
- continue # field is optional
- w_obj = self.getdictvalue(space, missing)
- if w_obj is None:
- raise oefmt(space.w_TypeError,
- "required field \"%s\" missing from %s",
- missing, host)
- else:
- raise oefmt(space.w_TypeError,
- "incorrect type for field \"%s\" in %s",
- missing, host)
- raise AssertionError("should not reach here")
-
-
-class NodeVisitorNotImplemented(Exception):
- pass
-
-
-class _FieldsWrapper(W_Root):
- "Hack around the fact we can't store tuples on a TypeDef."
-
- def __init__(self, fields):
- self.fields = fields
-
- def __spacebind__(self, space):
- return space.newtuple([space.wrap(field) for field in self.fields])
-
-
-def get_AST_new(node_class):
- def generic_AST_new(space, w_type, __args__):
- node = space.allocate_instance(node_class, w_type)
- node.initialization_state = 0
- return space.wrap(node)
- return func_with_new_name(generic_AST_new, "new_%s" % node_class.__name__)
-
-def AST_init(space, w_self, __args__):
+def W_AST_new(space, w_type, __args__):
+ node = space.allocate_instance(W_AST, w_type)
+ return space.wrap(node)
+
+def W_AST_init(space, w_self, __args__):
args_w, kwargs_w = __args__.unpack()
- if args_w and len(args_w) != 0:
- w_err = space.wrap("_ast.AST constructor takes 0 positional arguments")
- raise OperationError(space.w_TypeError, w_err)
+ fields_w = space.fixedview(space.getattr(space.type(w_self),
+ space.wrap("_fields")))
+ num_fields = len(fields_w) if fields_w else 0
+ if args_w and len(args_w) != num_fields:
+ if num_fields == 0:
+ raise oefmt(space.w_TypeError,
+ "%T constructor takes 0 positional arguments", w_self)
+ elif num_fields == 1:
+ raise oefmt(space.w_TypeError,
+ "%T constructor takes either 0 or %d positional argument", w_self, num_fields)
+ else:
+ raise oefmt(space.w_TypeError,
+ "%T constructor takes either 0 or %d positional arguments", w_self, num_fields)
+ if args_w:
+ for i, w_field in enumerate(fields_w):
+ space.setattr(w_self, w_field, args_w[i])
for field, w_value in kwargs_w.iteritems():
space.setattr(w_self, space.wrap(field), w_value)
-AST.typedef = typedef.TypeDef("_ast.AST",
+
+W_AST.typedef = typedef.TypeDef("_ast.AST",
_fields=_FieldsWrapper([]),
_attributes=_FieldsWrapper([]),
- __reduce__=interp2app(AST.reduce_w),
- __setstate__=interp2app(AST.setstate_w),
+ __reduce__=interp2app(W_AST.reduce_w),
+ __setstate__=interp2app(W_AST.setstate_w),
__dict__ = typedef.GetSetProperty(typedef.descr_get_dict,
- typedef.descr_set_dict, cls=AST),
- __new__=interp2app(get_AST_new(AST)),
- __init__=interp2app(AST_init),
+ typedef.descr_set_dict, cls=W_AST),
+ __new__=interp2app(W_AST_new),
+ __init__=interp2app(W_AST_init),
)
-
-
+class State:
+ AST_TYPES = []
+
+ @classmethod
+ def ast_type(cls, name, base, fields, attributes=None):
+ cls.AST_TYPES.append((name, base, fields, attributes))
+
+ def __init__(self, space):
+ self.w_AST = space.gettypeobject(W_AST.typedef)
+ for (name, base, fields, attributes) in self.AST_TYPES:
+ self.make_new_type(space, name, base, fields, attributes)
+
+ def make_new_type(self, space, name, base, fields, attributes):
+ w_base = getattr(self, 'w_%s' % base)
+ w_dict = space.newdict()
+ space.setitem_str(w_dict, '__module__', space.wrap('_ast'))
+ if fields is not None:
+ space.setitem_str(w_dict, "_fields",
+ space.newtuple([space.wrap(f) for f in fields]))
+ if attributes is not None:
+ space.setitem_str(w_dict, "_attributes",
+ space.newtuple([space.wrap(a) for a in attributes]))
+ w_type = space.call_function(
+ space.w_type,
+ space.wrap(name), space.newtuple([w_base]), w_dict)
+ setattr(self, 'w_%s' % name, w_type)
+
+def get(space):
+ return space.fromcache(State)
class mod(AST):
- pass
+ @staticmethod
+ def from_object(space, w_node):
+ if space.is_w(w_node, space.w_None):
+ return None
+ if space.isinstance_w(w_node, get(space).w_Module):
+ return Module.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Interactive):
+ return Interactive.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Expression):
+ return Expression.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Suite):
+ return Suite.from_object(space, w_node)
+ raise oefmt(space.w_TypeError,
+ "Expected mod node, got %T", w_node)
+State.ast_type('mod', 'AST', None, [])
class Module(mod):
def __init__(self, body):
self.body = body
- self.w_body = None
- self.initialization_state = 1
def walkabout(self, visitor):
visitor.visit_Module(self)
@@ -153,29 +186,30 @@
visitor._mutate_sequence(self.body)
return visitor.visit_Module(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 1:
- self.missing_field(space, ['body'], 'Module')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Module)
+ if self.body is None:
+ body_w = []
else:
- pass
- w_list = self.w_body
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.body = None
- if self.body is not None:
- for node in self.body:
- node.sync_app_attrs(space)
+ body_w = [node.to_object(space) for node in self.body] # stmt
+ w_body = space.newlist(body_w)
+ space.setattr(w_node, space.wrap('body'), w_body)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_body = get_field(space, w_node, 'body', False)
+ body_w = space.unpackiterable(w_body)
+ _body = [stmt.from_object(space, w_item) for w_item in body_w]
+ return Module(_body)
+
+State.ast_type('Module', 'mod', ['body'])
class Interactive(mod):
def __init__(self, body):
self.body = body
- self.w_body = None
- self.initialization_state = 1
def walkabout(self, visitor):
visitor.visit_Interactive(self)
@@ -185,28 +219,30 @@
visitor._mutate_sequence(self.body)
return visitor.visit_Interactive(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 1:
- self.missing_field(space, ['body'], 'Interactive')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Interactive)
+ if self.body is None:
+ body_w = []
else:
- pass
- w_list = self.w_body
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.body = None
- if self.body is not None:
- for node in self.body:
- node.sync_app_attrs(space)
+ body_w = [node.to_object(space) for node in self.body] # stmt
+ w_body = space.newlist(body_w)
+ space.setattr(w_node, space.wrap('body'), w_body)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_body = get_field(space, w_node, 'body', False)
+ body_w = space.unpackiterable(w_body)
+ _body = [stmt.from_object(space, w_item) for w_item in body_w]
+ return Interactive(_body)
+
+State.ast_type('Interactive', 'mod', ['body'])
class Expression(mod):
def __init__(self, body):
self.body = body
- self.initialization_state = 1
def walkabout(self, visitor):
visitor.visit_Expression(self)
@@ -215,20 +251,25 @@
self.body = self.body.mutate_over(visitor)
return visitor.visit_Expression(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 1:
- self.missing_field(space, ['body'], 'Expression')
- else:
- pass
- self.body.sync_app_attrs(space)
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Expression)
+ w_body = self.body.to_object(space) # expr
+ space.setattr(w_node, space.wrap('body'), w_body)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_body = get_field(space, w_node, 'body', False)
+ _body = expr.from_object(space, w_body)
+ return Expression(_body)
+
+State.ast_type('Expression', 'mod', ['body'])
class Suite(mod):
def __init__(self, body):
self.body = body
- self.w_body = None
- self.initialization_state = 1
def walkabout(self, visitor):
visitor.visit_Suite(self)
@@ -238,21 +279,24 @@
visitor._mutate_sequence(self.body)
return visitor.visit_Suite(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 1:
- self.missing_field(space, ['body'], 'Suite')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Suite)
+ if self.body is None:
+ body_w = []
else:
- pass
- w_list = self.w_body
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.body = None
- if self.body is not None:
- for node in self.body:
- node.sync_app_attrs(space)
+ body_w = [node.to_object(space) for node in self.body] # stmt
+ w_body = space.newlist(body_w)
+ space.setattr(w_node, space.wrap('body'), w_body)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_body = get_field(space, w_node, 'body', False)
+ body_w = space.unpackiterable(w_body)
+ _body = [stmt.from_object(space, w_item) for w_item in body_w]
+ return Suite(_body)
+
+State.ast_type('Suite', 'mod', ['body'])
class stmt(AST):
@@ -261,17 +305,68 @@
self.lineno = lineno
self.col_offset = col_offset
+ @staticmethod
+ def from_object(space, w_node):
+ if space.is_w(w_node, space.w_None):
+ return None
+ if space.isinstance_w(w_node, get(space).w_FunctionDef):
+ return FunctionDef.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_ClassDef):
+ return ClassDef.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Return):
+ return Return.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Delete):
+ return Delete.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Assign):
+ return Assign.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_AugAssign):
+ return AugAssign.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Print):
+ return Print.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_For):
+ return For.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_While):
+ return While.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_If):
+ return If.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_With):
+ return With.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Raise):
+ return Raise.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_TryExcept):
+ return TryExcept.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_TryFinally):
+ return TryFinally.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Assert):
+ return Assert.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Import):
+ return Import.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_ImportFrom):
+ return ImportFrom.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Exec):
+ return Exec.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Global):
+ return Global.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Expr):
+ return Expr.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Pass):
+ return Pass.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Break):
+ return Break.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Continue):
+ return Continue.from_object(space, w_node)
+ raise oefmt(space.w_TypeError,
+ "Expected stmt node, got %T", w_node)
+State.ast_type('stmt', 'AST', None, ['lineno', 'col_offset'])
+
class FunctionDef(stmt):
def __init__(self, name, args, body, decorator_list, lineno, col_offset):
self.name = name
self.args = args
self.body = body
- self.w_body = None
self.decorator_list = decorator_list
- self.w_decorator_list = None
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 63
def walkabout(self, visitor):
visitor.visit_FunctionDef(self)
@@ -284,32 +379,49 @@
visitor._mutate_sequence(self.decorator_list)
return visitor.visit_FunctionDef(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 63:
- self.missing_field(space, ['lineno', 'col_offset', 'name', 'args', 'body', 'decorator_list'], 'FunctionDef')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_FunctionDef)
+ w_name = space.wrap(self.name) # identifier
+ space.setattr(w_node, space.wrap('name'), w_name)
+ w_args = self.args.to_object(space) # arguments
+ space.setattr(w_node, space.wrap('args'), w_args)
+ if self.body is None:
+ body_w = []
else:
- pass
- self.args.sync_app_attrs(space)
- w_list = self.w_body
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.body = None
- if self.body is not None:
- for node in self.body:
- node.sync_app_attrs(space)
- w_list = self.w_decorator_list
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.decorator_list = [space.interp_w(expr, w_obj) for w_obj in list_w]
- else:
- self.decorator_list = None
- if self.decorator_list is not None:
- for node in self.decorator_list:
- node.sync_app_attrs(space)
+ body_w = [node.to_object(space) for node in self.body] # stmt
+ w_body = space.newlist(body_w)
+ space.setattr(w_node, space.wrap('body'), w_body)
+ if self.decorator_list is None:
+ decorator_list_w = []
+ else:
+ decorator_list_w = [node.to_object(space) for node in self.decorator_list] # expr
+ w_decorator_list = space.newlist(decorator_list_w)
+ space.setattr(w_node, space.wrap('decorator_list'), w_decorator_list)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_name = get_field(space, w_node, 'name', False)
+ w_args = get_field(space, w_node, 'args', False)
+ w_body = get_field(space, w_node, 'body', False)
+ w_decorator_list = get_field(space, w_node, 'decorator_list', False)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _name = space.realstr_w(w_name)
+ _args = arguments.from_object(space, w_args)
+ body_w = space.unpackiterable(w_body)
+ _body = [stmt.from_object(space, w_item) for w_item in body_w]
+ decorator_list_w = space.unpackiterable(w_decorator_list)
+ _decorator_list = [expr.from_object(space, w_item) for w_item in decorator_list_w]
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return FunctionDef(_name, _args, _body, _decorator_list, _lineno, _col_offset)
+
+State.ast_type('FunctionDef', 'stmt', ['name', 'args', 'body', 'decorator_list'])
class ClassDef(stmt):
@@ -317,13 +429,9 @@
def __init__(self, name, bases, body, decorator_list, lineno, col_offset):
self.name = name
self.bases = bases
- self.w_bases = None
self.body = body
- self.w_body = None
self.decorator_list = decorator_list
- self.w_decorator_list = None
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 63
def walkabout(self, visitor):
visitor.visit_ClassDef(self)
@@ -337,41 +445,54 @@
visitor._mutate_sequence(self.decorator_list)
return visitor.visit_ClassDef(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 63:
- self.missing_field(space, ['lineno', 'col_offset', 'name', 'bases', 'body', 'decorator_list'], 'ClassDef')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_ClassDef)
+ w_name = space.wrap(self.name) # identifier
+ space.setattr(w_node, space.wrap('name'), w_name)
+ if self.bases is None:
+ bases_w = []
else:
- pass
- w_list = self.w_bases
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.bases = [space.interp_w(expr, w_obj) for w_obj in list_w]
- else:
- self.bases = None
- if self.bases is not None:
- for node in self.bases:
- node.sync_app_attrs(space)
- w_list = self.w_body
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.body = None
- if self.body is not None:
- for node in self.body:
- node.sync_app_attrs(space)
- w_list = self.w_decorator_list
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.decorator_list = [space.interp_w(expr, w_obj) for w_obj in list_w]
- else:
- self.decorator_list = None
- if self.decorator_list is not None:
- for node in self.decorator_list:
- node.sync_app_attrs(space)
+ bases_w = [node.to_object(space) for node in self.bases] # expr
+ w_bases = space.newlist(bases_w)
+ space.setattr(w_node, space.wrap('bases'), w_bases)
+ if self.body is None:
+ body_w = []
+ else:
+ body_w = [node.to_object(space) for node in self.body] # stmt
+ w_body = space.newlist(body_w)
+ space.setattr(w_node, space.wrap('body'), w_body)
+ if self.decorator_list is None:
+ decorator_list_w = []
+ else:
+ decorator_list_w = [node.to_object(space) for node in self.decorator_list] # expr
+ w_decorator_list = space.newlist(decorator_list_w)
+ space.setattr(w_node, space.wrap('decorator_list'), w_decorator_list)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_name = get_field(space, w_node, 'name', False)
+ w_bases = get_field(space, w_node, 'bases', False)
+ w_body = get_field(space, w_node, 'body', False)
+ w_decorator_list = get_field(space, w_node, 'decorator_list', False)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _name = space.realstr_w(w_name)
+ bases_w = space.unpackiterable(w_bases)
+ _bases = [expr.from_object(space, w_item) for w_item in bases_w]
+ body_w = space.unpackiterable(w_body)
+ _body = [stmt.from_object(space, w_item) for w_item in body_w]
+ decorator_list_w = space.unpackiterable(w_decorator_list)
+ _decorator_list = [expr.from_object(space, w_item) for w_item in decorator_list_w]
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return ClassDef(_name, _bases, _body, _decorator_list, _lineno, _col_offset)
+
+State.ast_type('ClassDef', 'stmt', ['name', 'bases', 'body', 'decorator_list'])
class Return(stmt):
@@ -379,7 +500,6 @@
def __init__(self, value, lineno, col_offset):
self.value = value
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 7
def walkabout(self, visitor):
visitor.visit_Return(self)
@@ -389,23 +509,34 @@
self.value = self.value.mutate_over(visitor)
return visitor.visit_Return(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~4) ^ 3:
- self.missing_field(space, ['lineno', 'col_offset', None], 'Return')
- else:
- if not self.initialization_state & 4:
- self.value = None
- if self.value:
- self.value.sync_app_attrs(space)
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Return)
+ w_value = self.value.to_object(space) if self.value is not None else space.w_None # expr
+ space.setattr(w_node, space.wrap('value'), w_value)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_value = get_field(space, w_node, 'value', True)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _value = expr.from_object(space, w_value)
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return Return(_value, _lineno, _col_offset)
+
+State.ast_type('Return', 'stmt', ['value'])
class Delete(stmt):
def __init__(self, targets, lineno, col_offset):
self.targets = targets
- self.w_targets = None
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 7
def walkabout(self, visitor):
visitor.visit_Delete(self)
@@ -415,31 +546,40 @@
visitor._mutate_sequence(self.targets)
return visitor.visit_Delete(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 7:
- self.missing_field(space, ['lineno', 'col_offset', 'targets'], 'Delete')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Delete)
+ if self.targets is None:
+ targets_w = []
else:
- pass
- w_list = self.w_targets
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.targets = [space.interp_w(expr, w_obj) for w_obj in list_w]
- else:
- self.targets = None
- if self.targets is not None:
- for node in self.targets:
- node.sync_app_attrs(space)
+ targets_w = [node.to_object(space) for node in self.targets] # expr
+ w_targets = space.newlist(targets_w)
+ space.setattr(w_node, space.wrap('targets'), w_targets)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_targets = get_field(space, w_node, 'targets', False)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ targets_w = space.unpackiterable(w_targets)
+ _targets = [expr.from_object(space, w_item) for w_item in targets_w]
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return Delete(_targets, _lineno, _col_offset)
+
+State.ast_type('Delete', 'stmt', ['targets'])
class Assign(stmt):
def __init__(self, targets, value, lineno, col_offset):
self.targets = targets
- self.w_targets = None
self.value = value
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 15
def walkabout(self, visitor):
visitor.visit_Assign(self)
@@ -450,22 +590,36 @@
self.value = self.value.mutate_over(visitor)
return visitor.visit_Assign(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 15:
- self.missing_field(space, ['lineno', 'col_offset', 'targets', 'value'], 'Assign')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Assign)
+ if self.targets is None:
+ targets_w = []
else:
- pass
- w_list = self.w_targets
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.targets = [space.interp_w(expr, w_obj) for w_obj in list_w]
- else:
- self.targets = None
- if self.targets is not None:
- for node in self.targets:
- node.sync_app_attrs(space)
- self.value.sync_app_attrs(space)
+ targets_w = [node.to_object(space) for node in self.targets] # expr
+ w_targets = space.newlist(targets_w)
+ space.setattr(w_node, space.wrap('targets'), w_targets)
+ w_value = self.value.to_object(space) # expr
+ space.setattr(w_node, space.wrap('value'), w_value)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_targets = get_field(space, w_node, 'targets', False)
+ w_value = get_field(space, w_node, 'value', False)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ targets_w = space.unpackiterable(w_targets)
+ _targets = [expr.from_object(space, w_item) for w_item in targets_w]
+ _value = expr.from_object(space, w_value)
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return Assign(_targets, _value, _lineno, _col_offset)
+
+State.ast_type('Assign', 'stmt', ['targets', 'value'])
class AugAssign(stmt):
@@ -475,7 +629,6 @@
self.op = op
self.value = value
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 31
def walkabout(self, visitor):
visitor.visit_AugAssign(self)
@@ -485,13 +638,35 @@
self.value = self.value.mutate_over(visitor)
return visitor.visit_AugAssign(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 31:
- self.missing_field(space, ['lineno', 'col_offset', 'target', 'op', 'value'], 'AugAssign')
- else:
- pass
- self.target.sync_app_attrs(space)
- self.value.sync_app_attrs(space)
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_AugAssign)
+ w_target = self.target.to_object(space) # expr
+ space.setattr(w_node, space.wrap('target'), w_target)
+ w_op = operator_to_class[self.op - 1]().to_object(space) # operator
+ space.setattr(w_node, space.wrap('op'), w_op)
+ w_value = self.value.to_object(space) # expr
+ space.setattr(w_node, space.wrap('value'), w_value)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_target = get_field(space, w_node, 'target', False)
+ w_op = get_field(space, w_node, 'op', False)
+ w_value = get_field(space, w_node, 'value', False)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _target = expr.from_object(space, w_target)
+ _op = operator.from_object(space, w_op)
+ _value = expr.from_object(space, w_value)
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return AugAssign(_target, _op, _value, _lineno, _col_offset)
+
+State.ast_type('AugAssign', 'stmt', ['target', 'op', 'value'])
class Print(stmt):
@@ -499,10 +674,8 @@
def __init__(self, dest, values, nl, lineno, col_offset):
self.dest = dest
self.values = values
- self.w_values = None
self.nl = nl
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 31
def walkabout(self, visitor):
visitor.visit_Print(self)
@@ -514,24 +687,40 @@
visitor._mutate_sequence(self.values)
return visitor.visit_Print(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~4) ^ 27:
- self.missing_field(space, ['lineno', 'col_offset', None, 'values', 'nl'], 'Print')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Print)
+ w_dest = self.dest.to_object(space) if self.dest is not None else space.w_None # expr
+ space.setattr(w_node, space.wrap('dest'), w_dest)
+ if self.values is None:
+ values_w = []
else:
- if not self.initialization_state & 4:
- self.dest = None
- if self.dest:
- self.dest.sync_app_attrs(space)
- w_list = self.w_values
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.values = [space.interp_w(expr, w_obj) for w_obj in list_w]
- else:
- self.values = None
- if self.values is not None:
- for node in self.values:
- node.sync_app_attrs(space)
+ values_w = [node.to_object(space) for node in self.values] # expr
+ w_values = space.newlist(values_w)
+ space.setattr(w_node, space.wrap('values'), w_values)
+ w_nl = space.wrap(self.nl) # bool
+ space.setattr(w_node, space.wrap('nl'), w_nl)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_dest = get_field(space, w_node, 'dest', True)
+ w_values = get_field(space, w_node, 'values', False)
+ w_nl = get_field(space, w_node, 'nl', False)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _dest = expr.from_object(space, w_dest)
+ values_w = space.unpackiterable(w_values)
+ _values = [expr.from_object(space, w_item) for w_item in values_w]
+ _nl = space.bool_w(w_nl)
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return Print(_dest, _values, _nl, _lineno, _col_offset)
+
+State.ast_type('Print', 'stmt', ['dest', 'values', 'nl'])
class For(stmt):
@@ -540,11 +729,8 @@
self.target = target
self.iter = iter
self.body = body
- self.w_body = None
self.orelse = orelse
- self.w_orelse = None
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 63
def walkabout(self, visitor):
visitor.visit_For(self)
@@ -558,33 +744,49 @@
visitor._mutate_sequence(self.orelse)
return visitor.visit_For(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 63:
- self.missing_field(space, ['lineno', 'col_offset', 'target', 'iter', 'body', 'orelse'], 'For')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_For)
+ w_target = self.target.to_object(space) # expr
+ space.setattr(w_node, space.wrap('target'), w_target)
+ w_iter = self.iter.to_object(space) # expr
+ space.setattr(w_node, space.wrap('iter'), w_iter)
+ if self.body is None:
+ body_w = []
else:
- pass
- self.target.sync_app_attrs(space)
- self.iter.sync_app_attrs(space)
- w_list = self.w_body
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.body = None
- if self.body is not None:
- for node in self.body:
- node.sync_app_attrs(space)
- w_list = self.w_orelse
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.orelse = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.orelse = None
- if self.orelse is not None:
- for node in self.orelse:
- node.sync_app_attrs(space)
+ body_w = [node.to_object(space) for node in self.body] # stmt
+ w_body = space.newlist(body_w)
+ space.setattr(w_node, space.wrap('body'), w_body)
+ if self.orelse is None:
+ orelse_w = []
+ else:
+ orelse_w = [node.to_object(space) for node in self.orelse] # stmt
+ w_orelse = space.newlist(orelse_w)
+ space.setattr(w_node, space.wrap('orelse'), w_orelse)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_target = get_field(space, w_node, 'target', False)
+ w_iter = get_field(space, w_node, 'iter', False)
+ w_body = get_field(space, w_node, 'body', False)
+ w_orelse = get_field(space, w_node, 'orelse', False)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _target = expr.from_object(space, w_target)
+ _iter = expr.from_object(space, w_iter)
+ body_w = space.unpackiterable(w_body)
+ _body = [stmt.from_object(space, w_item) for w_item in body_w]
+ orelse_w = space.unpackiterable(w_orelse)
+ _orelse = [stmt.from_object(space, w_item) for w_item in orelse_w]
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return For(_target, _iter, _body, _orelse, _lineno, _col_offset)
+
+State.ast_type('For', 'stmt', ['target', 'iter', 'body', 'orelse'])
class While(stmt):
@@ -592,11 +794,8 @@
def __init__(self, test, body, orelse, lineno, col_offset):
self.test = test
self.body = body
- self.w_body = None
self.orelse = orelse
- self.w_orelse = None
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 31
def walkabout(self, visitor):
visitor.visit_While(self)
@@ -609,32 +808,45 @@
visitor._mutate_sequence(self.orelse)
return visitor.visit_While(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 31:
- self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'While')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_While)
+ w_test = self.test.to_object(space) # expr
+ space.setattr(w_node, space.wrap('test'), w_test)
+ if self.body is None:
+ body_w = []
else:
- pass
- self.test.sync_app_attrs(space)
- w_list = self.w_body
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.body = None
- if self.body is not None:
- for node in self.body:
- node.sync_app_attrs(space)
- w_list = self.w_orelse
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.orelse = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.orelse = None
- if self.orelse is not None:
- for node in self.orelse:
- node.sync_app_attrs(space)
+ body_w = [node.to_object(space) for node in self.body] # stmt
+ w_body = space.newlist(body_w)
+ space.setattr(w_node, space.wrap('body'), w_body)
+ if self.orelse is None:
+ orelse_w = []
+ else:
+ orelse_w = [node.to_object(space) for node in self.orelse] # stmt
+ w_orelse = space.newlist(orelse_w)
+ space.setattr(w_node, space.wrap('orelse'), w_orelse)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_test = get_field(space, w_node, 'test', False)
+ w_body = get_field(space, w_node, 'body', False)
+ w_orelse = get_field(space, w_node, 'orelse', False)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _test = expr.from_object(space, w_test)
+ body_w = space.unpackiterable(w_body)
+ _body = [stmt.from_object(space, w_item) for w_item in body_w]
+ orelse_w = space.unpackiterable(w_orelse)
+ _orelse = [stmt.from_object(space, w_item) for w_item in orelse_w]
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return While(_test, _body, _orelse, _lineno, _col_offset)
+
+State.ast_type('While', 'stmt', ['test', 'body', 'orelse'])
class If(stmt):
@@ -642,11 +854,8 @@
def __init__(self, test, body, orelse, lineno, col_offset):
self.test = test
self.body = body
- self.w_body = None
self.orelse = orelse
- self.w_orelse = None
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 31
def walkabout(self, visitor):
visitor.visit_If(self)
@@ -659,32 +868,45 @@
visitor._mutate_sequence(self.orelse)
return visitor.visit_If(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 31:
- self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'If')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_If)
+ w_test = self.test.to_object(space) # expr
+ space.setattr(w_node, space.wrap('test'), w_test)
+ if self.body is None:
+ body_w = []
else:
- pass
- self.test.sync_app_attrs(space)
- w_list = self.w_body
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.body = None
- if self.body is not None:
- for node in self.body:
- node.sync_app_attrs(space)
- w_list = self.w_orelse
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.orelse = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.orelse = None
- if self.orelse is not None:
- for node in self.orelse:
- node.sync_app_attrs(space)
+ body_w = [node.to_object(space) for node in self.body] # stmt
+ w_body = space.newlist(body_w)
+ space.setattr(w_node, space.wrap('body'), w_body)
+ if self.orelse is None:
+ orelse_w = []
+ else:
+ orelse_w = [node.to_object(space) for node in self.orelse] # stmt
+ w_orelse = space.newlist(orelse_w)
+ space.setattr(w_node, space.wrap('orelse'), w_orelse)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_test = get_field(space, w_node, 'test', False)
+ w_body = get_field(space, w_node, 'body', False)
+ w_orelse = get_field(space, w_node, 'orelse', False)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _test = expr.from_object(space, w_test)
+ body_w = space.unpackiterable(w_body)
+ _body = [stmt.from_object(space, w_item) for w_item in body_w]
+ orelse_w = space.unpackiterable(w_orelse)
+ _orelse = [stmt.from_object(space, w_item) for w_item in orelse_w]
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return If(_test, _body, _orelse, _lineno, _col_offset)
+
+State.ast_type('If', 'stmt', ['test', 'body', 'orelse'])
class With(stmt):
@@ -693,9 +915,7 @@
self.context_expr = context_expr
self.optional_vars = optional_vars
self.body = body
- self.w_body = None
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 31
def walkabout(self, visitor):
visitor.visit_With(self)
@@ -708,25 +928,40 @@
visitor._mutate_sequence(self.body)
return visitor.visit_With(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~8) ^ 23:
- self.missing_field(space, ['lineno', 'col_offset', 'context_expr', None, 'body'], 'With')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_With)
+ w_context_expr = self.context_expr.to_object(space) # expr
+ space.setattr(w_node, space.wrap('context_expr'), w_context_expr)
+ w_optional_vars = self.optional_vars.to_object(space) if self.optional_vars is not None else space.w_None # expr
+ space.setattr(w_node, space.wrap('optional_vars'), w_optional_vars)
+ if self.body is None:
+ body_w = []
else:
- if not self.initialization_state & 8:
- self.optional_vars = None
- self.context_expr.sync_app_attrs(space)
- if self.optional_vars:
- self.optional_vars.sync_app_attrs(space)
- w_list = self.w_body
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.body = None
- if self.body is not None:
- for node in self.body:
- node.sync_app_attrs(space)
+ body_w = [node.to_object(space) for node in self.body] # stmt
+ w_body = space.newlist(body_w)
+ space.setattr(w_node, space.wrap('body'), w_body)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_context_expr = get_field(space, w_node, 'context_expr', False)
+ w_optional_vars = get_field(space, w_node, 'optional_vars', True)
+ w_body = get_field(space, w_node, 'body', False)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _context_expr = expr.from_object(space, w_context_expr)
+ _optional_vars = expr.from_object(space, w_optional_vars)
+ body_w = space.unpackiterable(w_body)
+ _body = [stmt.from_object(space, w_item) for w_item in body_w]
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return With(_context_expr, _optional_vars, _body, _lineno, _col_offset)
+
+State.ast_type('With', 'stmt', ['context_expr', 'optional_vars', 'body'])
class Raise(stmt):
@@ -736,7 +971,6 @@
self.inst = inst
self.tback = tback
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 31
def walkabout(self, visitor):
visitor.visit_Raise(self)
@@ -750,35 +984,44 @@
self.tback = self.tback.mutate_over(visitor)
return visitor.visit_Raise(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~28) ^ 3:
- self.missing_field(space, ['lineno', 'col_offset', None, None, None], 'Raise')
- else:
- if not self.initialization_state & 4:
- self.type = None
- if not self.initialization_state & 8:
- self.inst = None
- if not self.initialization_state & 16:
- self.tback = None
- if self.type:
- self.type.sync_app_attrs(space)
- if self.inst:
- self.inst.sync_app_attrs(space)
- if self.tback:
- self.tback.sync_app_attrs(space)
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Raise)
+ w_type = self.type.to_object(space) if self.type is not None else space.w_None # expr
+ space.setattr(w_node, space.wrap('type'), w_type)
+ w_inst = self.inst.to_object(space) if self.inst is not None else space.w_None # expr
+ space.setattr(w_node, space.wrap('inst'), w_inst)
+ w_tback = self.tback.to_object(space) if self.tback is not None else space.w_None # expr
+ space.setattr(w_node, space.wrap('tback'), w_tback)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_type = get_field(space, w_node, 'type', True)
+ w_inst = get_field(space, w_node, 'inst', True)
+ w_tback = get_field(space, w_node, 'tback', True)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _type = expr.from_object(space, w_type)
+ _inst = expr.from_object(space, w_inst)
+ _tback = expr.from_object(space, w_tback)
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return Raise(_type, _inst, _tback, _lineno, _col_offset)
+
+State.ast_type('Raise', 'stmt', ['type', 'inst', 'tback'])
class TryExcept(stmt):
def __init__(self, body, handlers, orelse, lineno, col_offset):
self.body = body
- self.w_body = None
self.handlers = handlers
- self.w_handlers = None
self.orelse = orelse
- self.w_orelse = None
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 31
def walkabout(self, visitor):
visitor.visit_TryExcept(self)
@@ -792,52 +1035,58 @@
visitor._mutate_sequence(self.orelse)
return visitor.visit_TryExcept(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 31:
- self.missing_field(space, ['lineno', 'col_offset', 'body', 'handlers', 'orelse'], 'TryExcept')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_TryExcept)
+ if self.body is None:
+ body_w = []
else:
- pass
- w_list = self.w_body
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.body = None
- if self.body is not None:
- for node in self.body:
- node.sync_app_attrs(space)
- w_list = self.w_handlers
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.handlers = [space.interp_w(excepthandler, w_obj) for w_obj in list_w]
- else:
- self.handlers = None
- if self.handlers is not None:
- for node in self.handlers:
- node.sync_app_attrs(space)
- w_list = self.w_orelse
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.orelse = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.orelse = None
- if self.orelse is not None:
- for node in self.orelse:
- node.sync_app_attrs(space)
+ body_w = [node.to_object(space) for node in self.body] # stmt
+ w_body = space.newlist(body_w)
+ space.setattr(w_node, space.wrap('body'), w_body)
+ if self.handlers is None:
+ handlers_w = []
+ else:
+ handlers_w = [node.to_object(space) for node in self.handlers] # excepthandler
+ w_handlers = space.newlist(handlers_w)
+ space.setattr(w_node, space.wrap('handlers'), w_handlers)
+ if self.orelse is None:
+ orelse_w = []
+ else:
+ orelse_w = [node.to_object(space) for node in self.orelse] # stmt
+ w_orelse = space.newlist(orelse_w)
+ space.setattr(w_node, space.wrap('orelse'), w_orelse)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_body = get_field(space, w_node, 'body', False)
+ w_handlers = get_field(space, w_node, 'handlers', False)
+ w_orelse = get_field(space, w_node, 'orelse', False)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ body_w = space.unpackiterable(w_body)
+ _body = [stmt.from_object(space, w_item) for w_item in body_w]
+ handlers_w = space.unpackiterable(w_handlers)
+ _handlers = [excepthandler.from_object(space, w_item) for w_item in handlers_w]
+ orelse_w = space.unpackiterable(w_orelse)
+ _orelse = [stmt.from_object(space, w_item) for w_item in orelse_w]
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return TryExcept(_body, _handlers, _orelse, _lineno, _col_offset)
+
+State.ast_type('TryExcept', 'stmt', ['body', 'handlers', 'orelse'])
class TryFinally(stmt):
def __init__(self, body, finalbody, lineno, col_offset):
self.body = body
- self.w_body = None
self.finalbody = finalbody
- self.w_finalbody = None
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 15
def walkabout(self, visitor):
visitor.visit_TryFinally(self)
@@ -849,31 +1098,41 @@
visitor._mutate_sequence(self.finalbody)
return visitor.visit_TryFinally(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 15:
- self.missing_field(space, ['lineno', 'col_offset', 'body', 'finalbody'], 'TryFinally')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_TryFinally)
+ if self.body is None:
+ body_w = []
else:
- pass
- w_list = self.w_body
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.body = None
- if self.body is not None:
- for node in self.body:
- node.sync_app_attrs(space)
- w_list = self.w_finalbody
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.finalbody = [space.interp_w(stmt, w_obj) for w_obj in list_w]
- else:
- self.finalbody = None
- if self.finalbody is not None:
- for node in self.finalbody:
- node.sync_app_attrs(space)
+ body_w = [node.to_object(space) for node in self.body] # stmt
+ w_body = space.newlist(body_w)
+ space.setattr(w_node, space.wrap('body'), w_body)
+ if self.finalbody is None:
+ finalbody_w = []
+ else:
+ finalbody_w = [node.to_object(space) for node in self.finalbody] # stmt
+ w_finalbody = space.newlist(finalbody_w)
+ space.setattr(w_node, space.wrap('finalbody'), w_finalbody)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_body = get_field(space, w_node, 'body', False)
+ w_finalbody = get_field(space, w_node, 'finalbody', False)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ body_w = space.unpackiterable(w_body)
+ _body = [stmt.from_object(space, w_item) for w_item in body_w]
+ finalbody_w = space.unpackiterable(w_finalbody)
+ _finalbody = [stmt.from_object(space, w_item) for w_item in finalbody_w]
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return TryFinally(_body, _finalbody, _lineno, _col_offset)
+
+State.ast_type('TryFinally', 'stmt', ['body', 'finalbody'])
class Assert(stmt):
@@ -882,7 +1141,6 @@
self.test = test
self.msg = msg
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 15
def walkabout(self, visitor):
visitor.visit_Assert(self)
@@ -893,24 +1151,38 @@
self.msg = self.msg.mutate_over(visitor)
return visitor.visit_Assert(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~8) ^ 7:
- self.missing_field(space, ['lineno', 'col_offset', 'test', None], 'Assert')
- else:
- if not self.initialization_state & 8:
- self.msg = None
- self.test.sync_app_attrs(space)
- if self.msg:
- self.msg.sync_app_attrs(space)
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Assert)
+ w_test = self.test.to_object(space) # expr
+ space.setattr(w_node, space.wrap('test'), w_test)
+ w_msg = self.msg.to_object(space) if self.msg is not None else space.w_None # expr
+ space.setattr(w_node, space.wrap('msg'), w_msg)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_test = get_field(space, w_node, 'test', False)
+ w_msg = get_field(space, w_node, 'msg', True)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _test = expr.from_object(space, w_test)
+ _msg = expr.from_object(space, w_msg)
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return Assert(_test, _msg, _lineno, _col_offset)
+
+State.ast_type('Assert', 'stmt', ['test', 'msg'])
class Import(stmt):
def __init__(self, names, lineno, col_offset):
self.names = names
- self.w_names = None
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 7
def walkabout(self, visitor):
visitor.visit_Import(self)
@@ -920,21 +1192,32 @@
visitor._mutate_sequence(self.names)
return visitor.visit_Import(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 7:
- self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Import')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Import)
+ if self.names is None:
+ names_w = []
else:
- pass
- w_list = self.w_names
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.names = [space.interp_w(alias, w_obj) for w_obj in list_w]
- else:
- self.names = None
- if self.names is not None:
- for node in self.names:
- node.sync_app_attrs(space)
+ names_w = [node.to_object(space) for node in self.names] # alias
+ w_names = space.newlist(names_w)
+ space.setattr(w_node, space.wrap('names'), w_names)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_names = get_field(space, w_node, 'names', False)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ names_w = space.unpackiterable(w_names)
+ _names = [alias.from_object(space, w_item) for w_item in names_w]
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return Import(_names, _lineno, _col_offset)
+
+State.ast_type('Import', 'stmt', ['names'])
class ImportFrom(stmt):
@@ -942,10 +1225,8 @@
def __init__(self, module, names, level, lineno, col_offset):
self.module = module
self.names = names
- self.w_names = None
self.level = level
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 31
def walkabout(self, visitor):
visitor.visit_ImportFrom(self)
@@ -955,24 +1236,40 @@
visitor._mutate_sequence(self.names)
return visitor.visit_ImportFrom(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~20) ^ 11:
- self.missing_field(space, ['lineno', 'col_offset', None, 'names', None], 'ImportFrom')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_ImportFrom)
+ w_module = space.wrap(self.module) # identifier
+ space.setattr(w_node, space.wrap('module'), w_module)
+ if self.names is None:
+ names_w = []
else:
- if not self.initialization_state & 4:
- self.module = None
- if not self.initialization_state & 16:
- self.level = 0
- w_list = self.w_names
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.names = [space.interp_w(alias, w_obj) for w_obj in list_w]
- else:
- self.names = None
- if self.names is not None:
- for node in self.names:
- node.sync_app_attrs(space)
+ names_w = [node.to_object(space) for node in self.names] # alias
+ w_names = space.newlist(names_w)
+ space.setattr(w_node, space.wrap('names'), w_names)
+ w_level = space.wrap(self.level) # int
+ space.setattr(w_node, space.wrap('level'), w_level)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_module = get_field(space, w_node, 'module', True)
+ w_names = get_field(space, w_node, 'names', False)
+ w_level = get_field(space, w_node, 'level', True)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _module = space.str_or_None_w(w_module)
+ names_w = space.unpackiterable(w_names)
+ _names = [alias.from_object(space, w_item) for w_item in names_w]
+ _level = space.int_w(w_level)
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return ImportFrom(_module, _names, _level, _lineno, _col_offset)
+
+State.ast_type('ImportFrom', 'stmt', ['module', 'names', 'level'])
class Exec(stmt):
@@ -982,7 +1279,6 @@
self.globals = globals
self.locals = locals
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 31
def walkabout(self, visitor):
visitor.visit_Exec(self)
@@ -995,28 +1291,42 @@
self.locals = self.locals.mutate_over(visitor)
return visitor.visit_Exec(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~24) ^ 7:
- self.missing_field(space, ['lineno', 'col_offset', 'body', None, None], 'Exec')
- else:
- if not self.initialization_state & 8:
- self.globals = None
- if not self.initialization_state & 16:
- self.locals = None
- self.body.sync_app_attrs(space)
- if self.globals:
- self.globals.sync_app_attrs(space)
- if self.locals:
- self.locals.sync_app_attrs(space)
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Exec)
+ w_body = self.body.to_object(space) # expr
+ space.setattr(w_node, space.wrap('body'), w_body)
+ w_globals = self.globals.to_object(space) if self.globals is not None else space.w_None # expr
+ space.setattr(w_node, space.wrap('globals'), w_globals)
+ w_locals = self.locals.to_object(space) if self.locals is not None else space.w_None # expr
+ space.setattr(w_node, space.wrap('locals'), w_locals)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_body = get_field(space, w_node, 'body', False)
+ w_globals = get_field(space, w_node, 'globals', True)
+ w_locals = get_field(space, w_node, 'locals', True)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _body = expr.from_object(space, w_body)
+ _globals = expr.from_object(space, w_globals)
+ _locals = expr.from_object(space, w_locals)
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return Exec(_body, _globals, _locals, _lineno, _col_offset)
+
+State.ast_type('Exec', 'stmt', ['body', 'globals', 'locals'])
class Global(stmt):
def __init__(self, names, lineno, col_offset):
self.names = names
- self.w_names = None
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 7
def walkabout(self, visitor):
visitor.visit_Global(self)
@@ -1024,18 +1334,32 @@
def mutate_over(self, visitor):
return visitor.visit_Global(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 7:
- self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Global')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Global)
+ if self.names is None:
+ names_w = []
else:
- pass
- w_list = self.w_names
- if w_list is not None:
- list_w = space.listview(w_list)
- if list_w:
- self.names = [space.realstr_w(w_obj) for w_obj in list_w]
- else:
- self.names = None
+ names_w = [space.wrap(node) for node in self.names] # identifier
+ w_names = space.newlist(names_w)
+ space.setattr(w_node, space.wrap('names'), w_names)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_names = get_field(space, w_node, 'names', False)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ names_w = space.unpackiterable(w_names)
+ _names = [space.realstr_w(w_item) for w_item in names_w]
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return Global(_names, _lineno, _col_offset)
+
+State.ast_type('Global', 'stmt', ['names'])
class Expr(stmt):
@@ -1043,7 +1367,6 @@
def __init__(self, value, lineno, col_offset):
self.value = value
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 7
def walkabout(self, visitor):
visitor.visit_Expr(self)
@@ -1052,19 +1375,33 @@
self.value = self.value.mutate_over(visitor)
return visitor.visit_Expr(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 7:
- self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Expr')
- else:
- pass
- self.value.sync_app_attrs(space)
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Expr)
+ w_value = self.value.to_object(space) # expr
+ space.setattr(w_node, space.wrap('value'), w_value)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_value = get_field(space, w_node, 'value', False)
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _value = expr.from_object(space, w_value)
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return Expr(_value, _lineno, _col_offset)
+
+State.ast_type('Expr', 'stmt', ['value'])
class Pass(stmt):
def __init__(self, lineno, col_offset):
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 3
def walkabout(self, visitor):
visitor.visit_Pass(self)
@@ -1072,18 +1409,29 @@
def mutate_over(self, visitor):
return visitor.visit_Pass(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 3:
- self.missing_field(space, ['lineno', 'col_offset'], 'Pass')
- else:
- pass
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Pass)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return Pass(_lineno, _col_offset)
+
+State.ast_type('Pass', 'stmt', [])
class Break(stmt):
def __init__(self, lineno, col_offset):
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 3
def walkabout(self, visitor):
visitor.visit_Break(self)
@@ -1091,18 +1439,29 @@
def mutate_over(self, visitor):
return visitor.visit_Break(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 3:
- self.missing_field(space, ['lineno', 'col_offset'], 'Break')
- else:
- pass
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Break)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return Break(_lineno, _col_offset)
+
+State.ast_type('Break', 'stmt', [])
class Continue(stmt):
def __init__(self, lineno, col_offset):
stmt.__init__(self, lineno, col_offset)
- self.initialization_state = 3
def walkabout(self, visitor):
visitor.visit_Continue(self)
@@ -1110,11 +1469,23 @@
def mutate_over(self, visitor):
return visitor.visit_Continue(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 3:
- self.missing_field(space, ['lineno', 'col_offset'], 'Continue')
- else:
- pass
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_Continue)
+ w_lineno = space.wrap(self.lineno) # int
+ space.setattr(w_node, space.wrap('lineno'), w_lineno)
+ w_col_offset = space.wrap(self.col_offset) # int
+ space.setattr(w_node, space.wrap('col_offset'), w_col_offset)
+ return w_node
+
+ @staticmethod
+ def from_object(space, w_node):
+ w_lineno = get_field(space, w_node, 'lineno', False)
+ w_col_offset = get_field(space, w_node, 'col_offset', False)
+ _lineno = space.int_w(w_lineno)
+ _col_offset = space.int_w(w_col_offset)
+ return Continue(_lineno, _col_offset)
+
+State.ast_type('Continue', 'stmt', [])
class expr(AST):
@@ -1123,14 +1494,66 @@
self.lineno = lineno
self.col_offset = col_offset
+ @staticmethod
+ def from_object(space, w_node):
+ if space.is_w(w_node, space.w_None):
+ return None
+ if space.isinstance_w(w_node, get(space).w_BoolOp):
+ return BoolOp.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_BinOp):
+ return BinOp.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_UnaryOp):
+ return UnaryOp.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Lambda):
+ return Lambda.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_IfExp):
+ return IfExp.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Dict):
+ return Dict.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Set):
+ return Set.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_ListComp):
+ return ListComp.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_SetComp):
+ return SetComp.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_DictComp):
+ return DictComp.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_GeneratorExp):
+ return GeneratorExp.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Yield):
+ return Yield.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Compare):
+ return Compare.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Call):
+ return Call.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Repr):
+ return Repr.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Num):
+ return Num.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Str):
+ return Str.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Attribute):
+ return Attribute.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Subscript):
+ return Subscript.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Name):
+ return Name.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_List):
+ return List.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Tuple):
+ return Tuple.from_object(space, w_node)
+ if space.isinstance_w(w_node, get(space).w_Const):
+ return Const.from_object(space, w_node)
+ raise oefmt(space.w_TypeError,
+ "Expected expr node, got %T", w_node)
+State.ast_type('expr', 'AST', None, ['lineno', 'col_offset'])
+
class BoolOp(expr):
def __init__(self, op, values, lineno, col_offset):
self.op = op
self.values = values
- self.w_values = None
expr.__init__(self, lineno, col_offset)
- self.initialization_state = 15
def walkabout(self, visitor):
visitor.visit_BoolOp(self)
@@ -1140,21 +1563,36 @@
visitor._mutate_sequence(self.values)
return visitor.visit_BoolOp(self)
- def sync_app_attrs(self, space):
- if (self.initialization_state & ~0) ^ 15:
- self.missing_field(space, ['lineno', 'col_offset', 'op', 'values'], 'BoolOp')
+ def to_object(self, space):
+ w_node = space.call_function(get(space).w_BoolOp)
+ w_op = boolop_to_class[self.op - 1]().to_object(space) # boolop
+ space.setattr(w_node, space.wrap('op'), w_op)
+ if self.values is None:
+ values_w = []
else:
From noreply at buildbot.pypy.org Mon Aug 18 16:55:25 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Mon, 18 Aug 2014 16:55:25 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Fix
Message-ID: <20140818145525.7E2AB1C059C@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72890:eb63f04c42a5
Date: 2014-08-18 16:54 +0200
http://bitbucket.org/pypy/pypy/changeset/eb63f04c42a5/
Log: Fix
diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py
--- a/rpython/jit/backend/x86/callbuilder.py
+++ b/rpython/jit/backend/x86/callbuilder.py
@@ -491,6 +491,7 @@
self.mc.MOV_rs(eax.value, 0)
def call_stm_before_ex_call(self):
+ from rpython.rlib import rstm
# XXX slowish: before any CALL_RELEASE_GIL, invoke the
# pypy_stm_commit_if_not_atomic() function. Messy because
# we need to save the register arguments first.
@@ -519,6 +520,7 @@
self.mc.POP_r(self.ARGUMENTS_GPR[i].value)
def call_stm_after_ex_call(self):
+ from rpython.rlib import rstm
# after any CALL_RELEASE_GIL, invoke the
# pypy_stm_start_if_not_atomic() function
self.save_result_value_reacq()
From noreply at buildbot.pypy.org Mon Aug 18 17:04:23 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Mon, 18 Aug 2014 17:04:23 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Silence C warnings
Message-ID: <20140818150423.069871C059C@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72891:c872c9937900
Date: 2014-08-18 17:03 +0200
http://bitbucket.org/pypy/pypy/changeset/c872c9937900/
Log: Silence C warnings
diff --git a/rpython/translator/c/src/threadlocal.h b/rpython/translator/c/src/threadlocal.h
--- a/rpython/translator/c/src/threadlocal.h
+++ b/rpython/translator/c/src/threadlocal.h
@@ -24,10 +24,15 @@
#ifdef USE___THREAD
-#define RPyThreadStaticTLS __thread long
+#ifdef RPY_STM
+# define RPY_THREAD_LOCAL_TYPE pypy_object0_t *
+#else
+# define RPY_THREAD_LOCAL_TYPE void *
+#endif
+#define RPyThreadStaticTLS __thread RPY_THREAD_LOCAL_TYPE
#define RPyThreadStaticTLS_Create(tls) (void)0
#define RPyThreadStaticTLS_Get(tls) tls
-#define RPyThreadStaticTLS_Set(tls, value) tls = (long)value
+#define RPyThreadStaticTLS_Set(tls, value) tls = (RPY_THREAD_LOCAL_TYPE)value
#define OP_THREADLOCALREF_GETADDR(tlref, ptr) ptr = tlref
#endif
From noreply at buildbot.pypy.org Mon Aug 18 17:48:34 2014
From: noreply at buildbot.pypy.org (Raemi)
Date: Mon, 18 Aug 2014 17:48:34 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: fix. we now need to turn
inevitable before all frees
Message-ID: <20140818154834.D5B0F1D2AE7@cobra.cs.uni-duesseldorf.de>
Author: Remi Meier
Branch: stmgc-c7-rewindjmp
Changeset: r72892:9c727ae67f53
Date: 2014-08-18 17:48 +0200
http://bitbucket.org/pypy/pypy/changeset/9c727ae67f53/
Log: fix. we now need to turn inevitable before all frees
diff --git a/rpython/translator/stm/inevitable.py b/rpython/translator/stm/inevitable.py
--- a/rpython/translator/stm/inevitable.py
+++ b/rpython/translator/stm/inevitable.py
@@ -37,7 +37,7 @@
# ____________________________________________________________
-def should_turn_inevitable_getter_setter(op, fresh_mallocs):
+def should_turn_inevitable_getter_setter(op):
# Getters and setters are allowed if their first argument is a GC pointer.
# If it is a RAW pointer, and it is a read from a non-immutable place,
# and it doesn't use the hint 'stm_dont_track_raw_accesses', then they
@@ -52,7 +52,7 @@
return False
if S._hints.get('stm_dont_track_raw_accesses', False):
return False
- return not fresh_mallocs.is_fresh_malloc(op.args[0])
+ return True
def should_turn_inevitable_call(op):
if op.opname == 'direct_call':
@@ -77,7 +77,7 @@
assert False
-def should_turn_inevitable(op, block, fresh_mallocs):
+def should_turn_inevitable(op, block):
# Always-allowed operations never cause a 'turn inevitable'
if op.opname in ALWAYS_ALLOW_OPERATIONS:
return False
@@ -86,22 +86,17 @@
if op.opname in GETTERS:
if op.result.concretetype is lltype.Void:
return False
- return should_turn_inevitable_getter_setter(op, fresh_mallocs)
+ return should_turn_inevitable_getter_setter(op)
if op.opname in SETTERS:
if op.args[-1].concretetype is lltype.Void:
return False
- return should_turn_inevitable_getter_setter(op, fresh_mallocs)
+ return should_turn_inevitable_getter_setter(op)
#
# Mallocs & Frees
if op.opname in MALLOCS:
return False
if op.opname in FREES:
- # We can only run a CFG in non-inevitable mode from start
- # to end in one transaction (every free gets called once
- # for every fresh malloc). No need to turn inevitable.
- # If the transaction is splitted, the remaining parts of the
- # CFG will always run in inevitable mode anyways.
- return not fresh_mallocs.is_fresh_malloc(op.args[0])
+ return True
#
# Function calls
if op.opname == 'direct_call' or op.opname == 'indirect_call':
@@ -117,12 +112,10 @@
varoftype(lltype.Void))
def insert_turn_inevitable(graph):
- from rpython.translator.backendopt.writeanalyze import FreshMallocs
- fresh_mallocs = FreshMallocs(graph)
for block in graph.iterblocks():
for i in range(len(block.operations)-1, -1, -1):
op = block.operations[i]
- inev = should_turn_inevitable(op, block, fresh_mallocs)
+ inev = should_turn_inevitable(op, block)
if inev:
if not isinstance(inev, str):
inev = op.opname
diff --git a/rpython/translator/stm/test/test_inevitable.py b/rpython/translator/stm/test/test_inevitable.py
--- a/rpython/translator/stm/test/test_inevitable.py
+++ b/rpython/translator/stm/test/test_inevitable.py
@@ -120,7 +120,7 @@
lltype.free(p, flavor='raw')
res = self.interpret_inevitable(f1, [])
- assert res is None
+ assert res == 'free'
def test_raw_malloc_2(self):
X = lltype.Struct('X', ('foo', lltype.Signed))
@@ -130,7 +130,7 @@
llmemory.raw_free(addr)
res = self.interpret_inevitable(f1, [])
- assert res is None
+ assert res == 'raw_free'
def test_unknown_raw_free(self):
X = lltype.Struct('X', ('foo', lltype.Signed))
From noreply at buildbot.pypy.org Mon Aug 18 18:39:47 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Mon, 18 Aug 2014 18:39:47 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: I think this operation
should not be placed in the "nosideeffect" group...
Message-ID: <20140818163947.E02391C059C@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72893:5e74e4d23364
Date: 2014-08-18 18:39 +0200
http://bitbucket.org/pypy/pypy/changeset/5e74e4d23364/
Log: I think this operation should not be placed in the "nosideeffect"
group...
diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py
--- a/rpython/jit/metainterp/resoperation.py
+++ b/rpython/jit/metainterp/resoperation.py
@@ -495,7 +495,6 @@
'VIRTUAL_REF/2', # removed before it's passed to the backend
'READ_TIMESTAMP/0',
'STM_SHOULD_BREAK_TRANSACTION/0',
- 'STM_HINT_COMMIT_SOON/0',
'MARK_OPAQUE_PTR/1b',
# this one has no *visible* side effect, since the virtualizable
# must be forced, however we need to execute it anyway
@@ -522,6 +521,7 @@
'RECORD_KNOWN_CLASS/2', # [objptr, clsptr]
'KEEPALIVE/1',
'STM_READ/1',
+ 'STM_HINT_COMMIT_SOON/0',
'_CANRAISE_FIRST', # ----- start of can_raise operations -----
'_CALL_FIRST',
From noreply at buildbot.pypy.org Mon Aug 18 19:54:56 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Mon, 18 Aug 2014 19:54:56 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Ready for merge
Message-ID: <20140818175456.507B91D2AC1@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7-rewindjmp
Changeset: r72894:8fbadacba2a7
Date: 2014-08-18 19:52 +0200
http://bitbucket.org/pypy/pypy/changeset/8fbadacba2a7/
Log: Ready for merge
From noreply at buildbot.pypy.org Mon Aug 18 19:55:02 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Mon, 18 Aug 2014 19:55:02 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7: Merge branch stmgc-c7-rewindjmp
Message-ID: <20140818175502.242111D2AC1@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7
Changeset: r72895:baea23f84952
Date: 2014-08-18 19:54 +0200
http://bitbucket.org/pypy/pypy/changeset/baea23f84952/
Log: Merge branch stmgc-c7-rewindjmp
Transactions can now continue even across any number of function
returns without becoming inevitable.
diff too long, truncating to 2000 out of 19427 lines
diff --git a/_pytest/__init__.py b/_pytest/__init__.py
--- a/_pytest/__init__.py
+++ b/_pytest/__init__.py
@@ -1,2 +1,2 @@
#
-__version__ = '2.2.4.dev2'
+__version__ = '2.5.2'
diff --git a/_pytest/_argcomplete.py b/_pytest/_argcomplete.py
new file mode 100644
--- /dev/null
+++ b/_pytest/_argcomplete.py
@@ -0,0 +1,104 @@
+
+"""allow bash-completion for argparse with argcomplete if installed
+needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail
+to find the magic string, so _ARGCOMPLETE env. var is never set, and
+this does not need special code.
+
+argcomplete does not support python 2.5 (although the changes for that
+are minor).
+
+Function try_argcomplete(parser) should be called directly before
+the call to ArgumentParser.parse_args().
+
+The filescompleter is what you normally would use on the positional
+arguments specification, in order to get "dirname/" after "dirn"
+instead of the default "dirname ":
+
+ optparser.add_argument(Config._file_or_dir, nargs='*'
+ ).completer=filescompleter
+
+Other, application specific, completers should go in the file
+doing the add_argument calls as they need to be specified as .completer
+attributes as well. (If argcomplete is not installed, the function the
+attribute points to will not be used).
+
+SPEEDUP
+=======
+The generic argcomplete script for bash-completion
+(/etc/bash_completion.d/python-argcomplete.sh )
+uses a python program to determine startup script generated by pip.
+You can speed up completion somewhat by changing this script to include
+ # PYTHON_ARGCOMPLETE_OK
+so the the python-argcomplete-check-easy-install-script does not
+need to be called to find the entry point of the code and see if that is
+marked with PYTHON_ARGCOMPLETE_OK
+
+INSTALL/DEBUGGING
+=================
+To include this support in another application that has setup.py generated
+scripts:
+- add the line:
+ # PYTHON_ARGCOMPLETE_OK
+ near the top of the main python entry point
+- include in the file calling parse_args():
+ from _argcomplete import try_argcomplete, filescompleter
+ , call try_argcomplete just before parse_args(), and optionally add
+ filescompleter to the positional arguments' add_argument()
+If things do not work right away:
+- switch on argcomplete debugging with (also helpful when doing custom
+ completers):
+ export _ARC_DEBUG=1
+- run:
+ python-argcomplete-check-easy-install-script $(which appname)
+ echo $?
+ will echo 0 if the magic line has been found, 1 if not
+- sometimes it helps to find early on errors using:
+ _ARGCOMPLETE=1 _ARC_DEBUG=1 appname
+ which should throw a KeyError: 'COMPLINE' (which is properly set by the
+ global argcomplete script).
+"""
+
+import sys
+import os
+from glob import glob
+
+class FastFilesCompleter:
+ 'Fast file completer class'
+ def __init__(self, directories=True):
+ self.directories = directories
+
+ def __call__(self, prefix, **kwargs):
+ """only called on non option completions"""
+ if os.path.sep in prefix[1:]: #
+ prefix_dir = len(os.path.dirname(prefix) + os.path.sep)
+ else:
+ prefix_dir = 0
+ completion = []
+ globbed = []
+ if '*' not in prefix and '?' not in prefix:
+ if prefix[-1] == os.path.sep: # we are on unix, otherwise no bash
+ globbed.extend(glob(prefix + '.*'))
+ prefix += '*'
+ globbed.extend(glob(prefix))
+ for x in sorted(globbed):
+ if os.path.isdir(x):
+ x += '/'
+ # append stripping the prefix (like bash, not like compgen)
+ completion.append(x[prefix_dir:])
+ return completion
+
+if os.environ.get('_ARGCOMPLETE'):
+ # argcomplete 0.5.6 is not compatible with python 2.5.6: print/with/format
+ if sys.version_info[:2] < (2, 6):
+ sys.exit(1)
+ try:
+ import argcomplete.completers
+ except ImportError:
+ sys.exit(-1)
+ filescompleter = FastFilesCompleter()
+
+ def try_argcomplete(parser):
+ argcomplete.autocomplete(parser)
+else:
+ def try_argcomplete(parser): pass
+ filescompleter = None
diff --git a/_pytest/assertion/__init__.py b/_pytest/assertion/__init__.py
--- a/_pytest/assertion/__init__.py
+++ b/_pytest/assertion/__init__.py
@@ -3,7 +3,6 @@
"""
import py
import sys
-import pytest
from _pytest.monkeypatch import monkeypatch
from _pytest.assertion import util
@@ -19,8 +18,8 @@
to provide assert expression information. """)
group.addoption('--no-assert', action="store_true", default=False,
dest="noassert", help="DEPRECATED equivalent to --assert=plain")
- group.addoption('--nomagic', action="store_true", default=False,
- dest="nomagic", help="DEPRECATED equivalent to --assert=plain")
+ group.addoption('--nomagic', '--no-magic', action="store_true",
+ default=False, help="DEPRECATED equivalent to --assert=plain")
class AssertionState:
"""State for the assertion plugin."""
@@ -35,22 +34,25 @@
mode = "plain"
if mode == "rewrite":
try:
- import ast
+ import ast # noqa
except ImportError:
mode = "reinterp"
else:
- if sys.platform.startswith('java'):
+ # Both Jython and CPython 2.6.0 have AST bugs that make the
+ # assertion rewriting hook malfunction.
+ if (sys.platform.startswith('java') or
+ sys.version_info[:3] == (2, 6, 0)):
mode = "reinterp"
if mode != "plain":
_load_modules(mode)
m = monkeypatch()
config._cleanup.append(m.undo)
m.setattr(py.builtin.builtins, 'AssertionError',
- reinterpret.AssertionError)
+ reinterpret.AssertionError) # noqa
hook = None
if mode == "rewrite":
- hook = rewrite.AssertionRewritingHook()
- sys.meta_path.append(hook)
+ hook = rewrite.AssertionRewritingHook() # noqa
+ sys.meta_path.insert(0, hook)
warn_about_missing_assertion(mode)
config._assertstate = AssertionState(config, mode)
config._assertstate.hook = hook
@@ -73,9 +75,16 @@
def callbinrepr(op, left, right):
hook_result = item.ihook.pytest_assertrepr_compare(
config=item.config, op=op, left=left, right=right)
+
for new_expl in hook_result:
if new_expl:
- res = '\n~'.join(new_expl)
+ # Don't include pageloads of data unless we are very
+ # verbose (-vv)
+ if (sum(len(p) for p in new_expl[1:]) > 80*8
+ and item.config.option.verbose < 2):
+ new_expl[1:] = [py.builtin._totext(
+ 'Detailed information truncated, use "-vv" to show')]
+ res = py.builtin._totext('\n~').join(new_expl)
if item.config.getvalue("assertmode") == "rewrite":
# The result will be fed back a python % formatting
# operation, which will fail if there are extraneous
@@ -95,9 +104,9 @@
def _load_modules(mode):
"""Lazily import assertion related code."""
global rewrite, reinterpret
- from _pytest.assertion import reinterpret
+ from _pytest.assertion import reinterpret # noqa
if mode == "rewrite":
- from _pytest.assertion import rewrite
+ from _pytest.assertion import rewrite # noqa
def warn_about_missing_assertion(mode):
try:
diff --git a/_pytest/assertion/newinterpret.py b/_pytest/assertion/newinterpret.py
--- a/_pytest/assertion/newinterpret.py
+++ b/_pytest/assertion/newinterpret.py
@@ -11,7 +11,7 @@
from _pytest.assertion.reinterpret import BuiltinAssertionError
-if sys.platform.startswith("java") and sys.version_info < (2, 5, 2):
+if sys.platform.startswith("java"):
# See http://bugs.jython.org/issue1497
_exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict",
"ListComp", "GeneratorExp", "Yield", "Compare", "Call",
diff --git a/_pytest/assertion/oldinterpret.py b/_pytest/assertion/oldinterpret.py
--- a/_pytest/assertion/oldinterpret.py
+++ b/_pytest/assertion/oldinterpret.py
@@ -526,10 +526,13 @@
# example:
def f():
return 5
+
def g():
return 3
+
def h(x):
return 'never'
+
check("f() * g() == 5")
check("not f()")
check("not (f() and g() or 0)")
diff --git a/_pytest/assertion/reinterpret.py b/_pytest/assertion/reinterpret.py
--- a/_pytest/assertion/reinterpret.py
+++ b/_pytest/assertion/reinterpret.py
@@ -1,18 +1,26 @@
import sys
import py
from _pytest.assertion.util import BuiltinAssertionError
+u = py.builtin._totext
+
class AssertionError(BuiltinAssertionError):
def __init__(self, *args):
BuiltinAssertionError.__init__(self, *args)
if args:
+ # on Python2.6 we get len(args)==2 for: assert 0, (x,y)
+ # on Python2.7 and above we always get len(args) == 1
+ # with args[0] being the (x,y) tuple.
+ if len(args) > 1:
+ toprint = args
+ else:
+ toprint = args[0]
try:
- self.msg = str(args[0])
- except py.builtin._sysex:
- raise
- except:
- self.msg = "<[broken __repr__] %s at %0xd>" %(
- args[0].__class__, id(args[0]))
+ self.msg = u(toprint)
+ except Exception:
+ self.msg = u(
+ "<[broken __repr__] %s at %0xd>"
+ % (toprint.__class__, id(toprint)))
else:
f = py.code.Frame(sys._getframe(1))
try:
@@ -44,4 +52,3 @@
from _pytest.assertion.newinterpret import interpret as reinterpret
else:
reinterpret = reinterpret_old
-
diff --git a/_pytest/assertion/rewrite.py b/_pytest/assertion/rewrite.py
--- a/_pytest/assertion/rewrite.py
+++ b/_pytest/assertion/rewrite.py
@@ -6,6 +6,7 @@
import imp
import marshal
import os
+import re
import struct
import sys
import types
@@ -14,13 +15,7 @@
from _pytest.assertion import util
-# Windows gives ENOENT in places *nix gives ENOTDIR.
-if sys.platform.startswith("win"):
- PATH_COMPONENT_NOT_DIR = errno.ENOENT
-else:
- PATH_COMPONENT_NOT_DIR = errno.ENOTDIR
-
-# py.test caches rewritten pycs in __pycache__.
+# pytest caches rewritten pycs in __pycache__.
if hasattr(imp, "get_tag"):
PYTEST_TAG = imp.get_tag() + "-PYTEST"
else:
@@ -34,17 +29,19 @@
PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1])
del ver, impl
-PYC_EXT = ".py" + "c" if __debug__ else "o"
+PYC_EXT = ".py" + (__debug__ and "c" or "o")
PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2)
+ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3
class AssertionRewritingHook(object):
- """Import hook which rewrites asserts."""
+ """PEP302 Import hook which rewrites asserts."""
def __init__(self):
self.session = None
self.modules = {}
+ self._register_with_pkg_resources()
def set_session(self, session):
self.fnpats = session.config.getini("python_files")
@@ -59,8 +56,12 @@
names = name.rsplit(".", 1)
lastname = names[-1]
pth = None
- if path is not None and len(path) == 1:
- pth = path[0]
+ if path is not None:
+ # Starting with Python 3.3, path is a _NamespacePath(), which
+ # causes problems if not converted to list.
+ path = list(path)
+ if len(path) == 1:
+ pth = path[0]
if pth is None:
try:
fd, fn, desc = imp.find_module(lastname, path)
@@ -95,12 +96,13 @@
finally:
self.session = sess
else:
- state.trace("matched test file (was specified on cmdline): %r" % (fn,))
+ state.trace("matched test file (was specified on cmdline): %r" %
+ (fn,))
# The requested module looks like a test file, so rewrite it. This is
# the most magical part of the process: load the source, rewrite the
# asserts, and load the rewritten source. We also cache the rewritten
# module code in a special pyc. We must be aware of the possibility of
- # concurrent py.test processes rewriting and loading pycs. To avoid
+ # concurrent pytest processes rewriting and loading pycs. To avoid
# tricky race conditions, we maintain the following invariant: The
# cached pyc is always a complete, valid pyc. Operations on it must be
# atomic. POSIX's atomic rename comes in handy.
@@ -116,19 +118,19 @@
# common case) or it's blocked by a non-dir node. In the
# latter case, we'll ignore it in _write_pyc.
pass
- elif e == PATH_COMPONENT_NOT_DIR:
+ elif e in [errno.ENOENT, errno.ENOTDIR]:
# One of the path components was not a directory, likely
# because we're in a zip file.
write = False
elif e == errno.EACCES:
- state.trace("read only directory: %r" % (fn_pypath.dirname,))
+ state.trace("read only directory: %r" % fn_pypath.dirname)
write = False
else:
raise
cache_name = fn_pypath.basename[:-3] + PYC_TAIL
pyc = os.path.join(cache_dir, cache_name)
- # Notice that even if we're in a read-only directory, I'm going to check
- # for a cached pyc. This may not be optimal...
+ # Notice that even if we're in a read-only directory, I'm going
+ # to check for a cached pyc. This may not be optimal...
co = _read_pyc(fn_pypath, pyc)
if co is None:
state.trace("rewriting %r" % (fn,))
@@ -153,27 +155,59 @@
mod.__file__ = co.co_filename
# Normally, this attribute is 3.2+.
mod.__cached__ = pyc
+ mod.__loader__ = self
py.builtin.exec_(co, mod.__dict__)
except:
del sys.modules[name]
raise
return sys.modules[name]
-def _write_pyc(co, source_path, pyc):
- # Technically, we don't have to have the same pyc format as (C)Python, since
- # these "pycs" should never be seen by builtin import. However, there's
- # little reason deviate, and I hope sometime to be able to use
- # imp.load_compiled to load them. (See the comment in load_module above.)
+
+
+ def is_package(self, name):
+ try:
+ fd, fn, desc = imp.find_module(name)
+ except ImportError:
+ return False
+ if fd is not None:
+ fd.close()
+ tp = desc[2]
+ return tp == imp.PKG_DIRECTORY
+
+ @classmethod
+ def _register_with_pkg_resources(cls):
+ """
+ Ensure package resources can be loaded from this loader. May be called
+ multiple times, as the operation is idempotent.
+ """
+ try:
+ import pkg_resources
+ # access an attribute in case a deferred importer is present
+ pkg_resources.__name__
+ except ImportError:
+ return
+
+ # Since pytest tests are always located in the file system, the
+ # DefaultProvider is appropriate.
+ pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider)
+
+
+def _write_pyc(state, co, source_path, pyc):
+ # Technically, we don't have to have the same pyc format as
+ # (C)Python, since these "pycs" should never be seen by builtin
+ # import. However, there's little reason deviate, and I hope
+ # sometime to be able to use imp.load_compiled to load them. (See
+ # the comment in load_module above.)
mtime = int(source_path.mtime())
try:
fp = open(pyc, "wb")
except IOError:
err = sys.exc_info()[1].errno
- if err == PATH_COMPONENT_NOT_DIR:
- # This happens when we get a EEXIST in find_module creating the
- # __pycache__ directory and __pycache__ is by some non-dir node.
- return False
- raise
+ state.trace("error writing pyc file at %s: errno=%s" %(pyc, err))
+ # we ignore any failure to write the cache file
+ # there are many reasons, permission-denied, __pycache__ being a
+ # file etc.
+ return False
try:
fp.write(imp.get_magic())
fp.write(struct.pack(">",
- ast.Add : "+",
- ast.Sub : "-",
- ast.Mult : "*",
- ast.Div : "/",
- ast.FloorDiv : "//",
- ast.Mod : "%",
- ast.Eq : "==",
- ast.NotEq : "!=",
- ast.Lt : "<",
- ast.LtE : "<=",
- ast.Gt : ">",
- ast.GtE : ">=",
- ast.Pow : "**",
- ast.Is : "is",
- ast.IsNot : "is not",
- ast.In : "in",
- ast.NotIn : "not in"
+ ast.BitOr: "|",
+ ast.BitXor: "^",
+ ast.BitAnd: "&",
+ ast.LShift: "<<",
+ ast.RShift: ">>",
+ ast.Add: "+",
+ ast.Sub: "-",
+ ast.Mult: "*",
+ ast.Div: "/",
+ ast.FloorDiv: "//",
+ ast.Mod: "%%", # escaped for string formatting
+ ast.Eq: "==",
+ ast.NotEq: "!=",
+ ast.Lt: "<",
+ ast.LtE: "<=",
+ ast.Gt: ">",
+ ast.GtE: ">=",
+ ast.Pow: "**",
+ ast.Is: "is",
+ ast.IsNot: "is not",
+ ast.In: "in",
+ ast.NotIn: "not in"
}
@@ -341,7 +408,7 @@
lineno = 0
for item in mod.body:
if (expect_docstring and isinstance(item, ast.Expr) and
- isinstance(item.value, ast.Str)):
+ isinstance(item.value, ast.Str)):
doc = item.value.s
if "PYTEST_DONT_REWRITE" in doc:
# The module has disabled assertion rewriting.
@@ -462,7 +529,8 @@
body.append(raise_)
# Clear temporary variables by setting them to None.
if self.variables:
- variables = [ast.Name(name, ast.Store()) for name in self.variables]
+ variables = [ast.Name(name, ast.Store())
+ for name in self.variables]
clear = ast.Assign(variables, ast.Name("None", ast.Load()))
self.statements.append(clear)
# Fix line numbers.
@@ -471,11 +539,12 @@
return self.statements
def visit_Name(self, name):
- # Check if the name is local or not.
+ # Display the repr of the name if it's a local variable or
+ # _should_repr_global_name() thinks it's acceptable.
locs = ast.Call(self.builtin("locals"), [], [], None, None)
- globs = ast.Call(self.builtin("globals"), [], [], None, None)
- ops = [ast.In(), ast.IsNot()]
- test = ast.Compare(ast.Str(name.id), ops, [locs, globs])
+ inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
+ dorepr = self.helper("should_repr_global_name", name)
+ test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
return name, self.explanation_param(expr)
@@ -492,7 +561,8 @@
for i, v in enumerate(boolop.values):
if i:
fail_inner = []
- self.on_failure.append(ast.If(cond, fail_inner, []))
+ # cond is set in a prior loop iteration below
+ self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa
self.on_failure = fail_inner
self.push_format_context()
res, expl = self.visit(v)
@@ -548,7 +618,8 @@
new_kwarg, expl = self.visit(call.kwargs)
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
- new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg)
+ new_call = ast.Call(new_func, new_args, new_kwargs,
+ new_star, new_kwarg)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
@@ -584,7 +655,7 @@
res_expr = ast.Compare(left_res, [op], [next_res])
self.statements.append(ast.Assign([store_names[i]], res_expr))
left_res, left_expl = next_res, next_expl
- # Use py.code._reprcompare if that's available.
+ # Use pytest.assertion.util._reprcompare if that's available.
expl_call = self.helper("call_reprcompare",
ast.Tuple(syms, ast.Load()),
ast.Tuple(load_names, ast.Load()),
diff --git a/_pytest/assertion/util.py b/_pytest/assertion/util.py
--- a/_pytest/assertion/util.py
+++ b/_pytest/assertion/util.py
@@ -1,8 +1,13 @@
"""Utilities for assertion debugging"""
import py
+try:
+ from collections import Sequence
+except ImportError:
+ Sequence = list
BuiltinAssertionError = py.builtin.builtins.AssertionError
+u = py.builtin._totext
# The _reprcompare attribute on the util module is used by the new assertion
# interpretation code and assertion rewriter to detect this plugin was
@@ -10,6 +15,7 @@
# DebugInterpreter.
_reprcompare = None
+
def format_explanation(explanation):
"""This formats an explanation
@@ -20,7 +26,18 @@
for when one explanation needs to span multiple lines, e.g. when
displaying diffs.
"""
- # simplify 'assert False where False = ...'
+ explanation = _collapse_false(explanation)
+ lines = _split_explanation(explanation)
+ result = _format_lines(lines)
+ return u('\n').join(result)
+
+
+def _collapse_false(explanation):
+ """Collapse expansions of False
+
+ So this strips out any "assert False\n{where False = ...\n}"
+ blocks.
+ """
where = 0
while True:
start = where = explanation.find("False\n{False = ", where)
@@ -42,28 +59,48 @@
explanation = (explanation[:start] + explanation[start+15:end-1] +
explanation[end+1:])
where -= 17
- raw_lines = (explanation or '').split('\n')
- # escape newlines not followed by {, } and ~
+ return explanation
+
+
+def _split_explanation(explanation):
+ """Return a list of individual lines in the explanation
+
+ This will return a list of lines split on '\n{', '\n}' and '\n~'.
+ Any other newlines will be escaped and appear in the line as the
+ literal '\n' characters.
+ """
+ raw_lines = (explanation or u('')).split('\n')
lines = [raw_lines[0]]
for l in raw_lines[1:]:
if l.startswith('{') or l.startswith('}') or l.startswith('~'):
lines.append(l)
else:
lines[-1] += '\\n' + l
+ return lines
+
+def _format_lines(lines):
+ """Format the individual lines
+
+ This will replace the '{', '}' and '~' characters of our mini
+ formatting language with the proper 'where ...', 'and ...' and ' +
+ ...' text, taking care of indentation along the way.
+
+ Return a list of formatted lines.
+ """
result = lines[:1]
stack = [0]
stackcnt = [0]
for line in lines[1:]:
if line.startswith('{'):
if stackcnt[-1]:
- s = 'and '
+ s = u('and ')
else:
- s = 'where '
+ s = u('where ')
stack.append(len(result))
stackcnt[-1] += 1
stackcnt.append(0)
- result.append(' +' + ' '*(len(stack)-1) + s + line[1:])
+ result.append(u(' +') + u(' ')*(len(stack)-1) + s + line[1:])
elif line.startswith('}'):
assert line.startswith('}')
stack.pop()
@@ -71,9 +108,9 @@
result[stack[-1]] += line[1:]
else:
assert line.startswith('~')
- result.append(' '*len(stack) + line[1:])
+ result.append(u(' ')*len(stack) + line[1:])
assert len(stack) == 1
- return '\n'.join(result)
+ return result
# Provide basestring in python3
@@ -83,132 +120,163 @@
basestring = str
-def assertrepr_compare(op, left, right):
- """return specialised explanations for some operators/operands"""
- width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
+def assertrepr_compare(config, op, left, right):
+ """Return specialised explanations for some operators/operands"""
+ width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
left_repr = py.io.saferepr(left, maxsize=int(width/2))
right_repr = py.io.saferepr(right, maxsize=width-len(left_repr))
- summary = '%s %s %s' % (left_repr, op, right_repr)
+ summary = u('%s %s %s') % (left_repr, op, right_repr)
- issequence = lambda x: isinstance(x, (list, tuple))
+ issequence = lambda x: (isinstance(x, (list, tuple, Sequence))
+ and not isinstance(x, basestring))
istext = lambda x: isinstance(x, basestring)
isdict = lambda x: isinstance(x, dict)
- isset = lambda x: isinstance(x, set)
+ isset = lambda x: isinstance(x, (set, frozenset))
+ verbose = config.getoption('verbose')
explanation = None
try:
if op == '==':
if istext(left) and istext(right):
- explanation = _diff_text(left, right)
+ explanation = _diff_text(left, right, verbose)
elif issequence(left) and issequence(right):
- explanation = _compare_eq_sequence(left, right)
+ explanation = _compare_eq_sequence(left, right, verbose)
elif isset(left) and isset(right):
- explanation = _compare_eq_set(left, right)
+ explanation = _compare_eq_set(left, right, verbose)
elif isdict(left) and isdict(right):
- explanation = _diff_text(py.std.pprint.pformat(left),
- py.std.pprint.pformat(right))
+ explanation = _compare_eq_dict(left, right, verbose)
elif op == 'not in':
if istext(left) and istext(right):
- explanation = _notin_text(left, right)
- except py.builtin._sysex:
- raise
- except:
+ explanation = _notin_text(left, right, verbose)
+ except Exception:
excinfo = py.code.ExceptionInfo()
- explanation = ['(pytest_assertion plugin: representation of '
- 'details failed. Probably an object has a faulty __repr__.)',
- str(excinfo)
- ]
-
+ explanation = [
+ u('(pytest_assertion plugin: representation of details failed. '
+ 'Probably an object has a faulty __repr__.)'),
+ u(excinfo)]
if not explanation:
return None
- # Don't include pageloads of data, should be configurable
- if len(''.join(explanation)) > 80*8:
- explanation = ['Detailed information too verbose, truncated']
-
return [summary] + explanation
-def _diff_text(left, right):
- """Return the explanation for the diff between text
+def _diff_text(left, right, verbose=False):
+ """Return the explanation for the diff between text or bytes
- This will skip leading and trailing characters which are
- identical to keep the diff minimal.
+ Unless --verbose is used this will skip leading and trailing
+ characters which are identical to keep the diff minimal.
+
+ If the input are bytes they will be safely converted to text.
"""
explanation = []
- i = 0 # just in case left or right has zero length
- for i in range(min(len(left), len(right))):
- if left[i] != right[i]:
- break
- if i > 42:
- i -= 10 # Provide some context
- explanation = ['Skipping %s identical '
- 'leading characters in diff' % i]
- left = left[i:]
- right = right[i:]
- if len(left) == len(right):
- for i in range(len(left)):
- if left[-i] != right[-i]:
+ if isinstance(left, py.builtin.bytes):
+ left = u(repr(left)[1:-1]).replace(r'\n', '\n')
+ if isinstance(right, py.builtin.bytes):
+ right = u(repr(right)[1:-1]).replace(r'\n', '\n')
+ if not verbose:
+ i = 0 # just in case left or right has zero length
+ for i in range(min(len(left), len(right))):
+ if left[i] != right[i]:
break
if i > 42:
- i -= 10 # Provide some context
- explanation += ['Skipping %s identical '
- 'trailing characters in diff' % i]
- left = left[:-i]
- right = right[:-i]
+ i -= 10 # Provide some context
+ explanation = [u('Skipping %s identical leading '
+ 'characters in diff, use -v to show') % i]
+ left = left[i:]
+ right = right[i:]
+ if len(left) == len(right):
+ for i in range(len(left)):
+ if left[-i] != right[-i]:
+ break
+ if i > 42:
+ i -= 10 # Provide some context
+ explanation += [u('Skipping %s identical trailing '
+ 'characters in diff, use -v to show') % i]
+ left = left[:-i]
+ right = right[:-i]
explanation += [line.strip('\n')
for line in py.std.difflib.ndiff(left.splitlines(),
right.splitlines())]
return explanation
-def _compare_eq_sequence(left, right):
+def _compare_eq_sequence(left, right, verbose=False):
explanation = []
for i in range(min(len(left), len(right))):
if left[i] != right[i]:
- explanation += ['At index %s diff: %r != %r' %
- (i, left[i], right[i])]
+ explanation += [u('At index %s diff: %r != %r')
+ % (i, left[i], right[i])]
break
if len(left) > len(right):
- explanation += ['Left contains more items, '
- 'first extra item: %s' % py.io.saferepr(left[len(right)],)]
+ explanation += [u('Left contains more items, first extra item: %s')
+ % py.io.saferepr(left[len(right)],)]
elif len(left) < len(right):
- explanation += ['Right contains more items, '
- 'first extra item: %s' % py.io.saferepr(right[len(left)],)]
- return explanation # + _diff_text(py.std.pprint.pformat(left),
- # py.std.pprint.pformat(right))
+ explanation += [
+ u('Right contains more items, first extra item: %s') %
+ py.io.saferepr(right[len(left)],)]
+ return explanation # + _diff_text(py.std.pprint.pformat(left),
+ # py.std.pprint.pformat(right))
-def _compare_eq_set(left, right):
+def _compare_eq_set(left, right, verbose=False):
explanation = []
diff_left = left - right
diff_right = right - left
if diff_left:
- explanation.append('Extra items in the left set:')
+ explanation.append(u('Extra items in the left set:'))
for item in diff_left:
explanation.append(py.io.saferepr(item))
if diff_right:
- explanation.append('Extra items in the right set:')
+ explanation.append(u('Extra items in the right set:'))
for item in diff_right:
explanation.append(py.io.saferepr(item))
return explanation
-def _notin_text(term, text):
+def _compare_eq_dict(left, right, verbose=False):
+ explanation = []
+ common = set(left).intersection(set(right))
+ same = dict((k, left[k]) for k in common if left[k] == right[k])
+ if same and not verbose:
+ explanation += [u('Omitting %s identical items, use -v to show') %
+ len(same)]
+ elif same:
+ explanation += [u('Common items:')]
+ explanation += py.std.pprint.pformat(same).splitlines()
+ diff = set(k for k in common if left[k] != right[k])
+ if diff:
+ explanation += [u('Differing items:')]
+ for k in diff:
+ explanation += [py.io.saferepr({k: left[k]}) + ' != ' +
+ py.io.saferepr({k: right[k]})]
+ extra_left = set(left) - set(right)
+ if extra_left:
+ explanation.append(u('Left contains more items:'))
+ explanation.extend(py.std.pprint.pformat(
+ dict((k, left[k]) for k in extra_left)).splitlines())
+ extra_right = set(right) - set(left)
+ if extra_right:
+ explanation.append(u('Right contains more items:'))
+ explanation.extend(py.std.pprint.pformat(
+ dict((k, right[k]) for k in extra_right)).splitlines())
+ return explanation
+
+
+def _notin_text(term, text, verbose=False):
index = text.find(term)
head = text[:index]
tail = text[index+len(term):]
correct_text = head + tail
- diff = _diff_text(correct_text, text)
- newdiff = ['%s is contained here:' % py.io.saferepr(term, maxsize=42)]
+ diff = _diff_text(correct_text, text, verbose)
+ newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)]
for line in diff:
- if line.startswith('Skipping'):
+ if line.startswith(u('Skipping')):
continue
- if line.startswith('- '):
+ if line.startswith(u('- ')):
continue
- if line.startswith('+ '):
- newdiff.append(' ' + line[2:])
+ if line.startswith(u('+ ')):
+ newdiff.append(u(' ') + line[2:])
else:
newdiff.append(line)
return newdiff
diff --git a/_pytest/capture.py b/_pytest/capture.py
--- a/_pytest/capture.py
+++ b/_pytest/capture.py
@@ -1,43 +1,114 @@
-""" per-test stdout/stderr capturing mechanisms, ``capsys`` and ``capfd`` function arguments. """
+"""
+ per-test stdout/stderr capturing mechanisms,
+ ``capsys`` and ``capfd`` function arguments.
+"""
+# note: py.io capture was where copied from
+# pylib 1.4.20.dev2 (rev 13d9af95547e)
+import sys
+import os
+import tempfile
-import pytest, py
-import os
+import py
+import pytest
+
+try:
+ from io import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+try:
+ from io import BytesIO
+except ImportError:
+ class BytesIO(StringIO):
+ def write(self, data):
+ if isinstance(data, unicode):
+ raise TypeError("not a byte value: %r" % (data,))
+ StringIO.write(self, data)
+
+if sys.version_info < (3, 0):
+ class TextIO(StringIO):
+ def write(self, data):
+ if not isinstance(data, unicode):
+ enc = getattr(self, '_encoding', 'UTF-8')
+ data = unicode(data, enc, 'replace')
+ StringIO.write(self, data)
+else:
+ TextIO = StringIO
+
+
+patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'}
+
def pytest_addoption(parser):
group = parser.getgroup("general")
- group._addoption('--capture', action="store", default=None,
- metavar="method", type="choice", choices=['fd', 'sys', 'no'],
+ group._addoption(
+ '--capture', action="store", default=None,
+ metavar="method", choices=['fd', 'sys', 'no'],
help="per-test capturing method: one of fd (default)|sys|no.")
- group._addoption('-s', action="store_const", const="no", dest="capture",
+ group._addoption(
+ '-s', action="store_const", const="no", dest="capture",
help="shortcut for --capture=no.")
+
@pytest.mark.tryfirst
-def pytest_cmdline_parse(pluginmanager, args):
- # we want to perform capturing already for plugin/conftest loading
- if '-s' in args or "--capture=no" in args:
- method = "no"
- elif hasattr(os, 'dup') and '--capture=sys' not in args:
+def pytest_load_initial_conftests(early_config, parser, args, __multicall__):
+ ns = parser.parse_known_args(args)
+ method = ns.capture
+ if not method:
method = "fd"
- else:
+ if method == "fd" and not hasattr(os, "dup"):
method = "sys"
capman = CaptureManager(method)
- pluginmanager.register(capman, "capturemanager")
+ early_config.pluginmanager.register(capman, "capturemanager")
+
+ # make sure that capturemanager is properly reset at final shutdown
+ def teardown():
+ try:
+ capman.reset_capturings()
+ except ValueError:
+ pass
+
+ early_config.pluginmanager.add_shutdown(teardown)
+
+ # make sure logging does not raise exceptions at the end
+ def silence_logging_at_shutdown():
+ if "logging" in sys.modules:
+ sys.modules["logging"].raiseExceptions = False
+ early_config.pluginmanager.add_shutdown(silence_logging_at_shutdown)
+
+ # finally trigger conftest loading but while capturing (issue93)
+ capman.resumecapture()
+ try:
+ try:
+ return __multicall__.execute()
+ finally:
+ out, err = capman.suspendcapture()
+ except:
+ sys.stdout.write(out)
+ sys.stderr.write(err)
+ raise
+
def addouterr(rep, outerr):
for secname, content in zip(["out", "err"], outerr):
if content:
rep.sections.append(("Captured std%s" % secname, content))
+
class NoCapture:
def startall(self):
pass
+
def resume(self):
pass
+
def reset(self):
pass
+
def suspend(self):
return "", ""
+
class CaptureManager:
def __init__(self, defaultmethod=None):
self._method2capture = {}
@@ -45,21 +116,23 @@
def _maketempfile(self):
f = py.std.tempfile.TemporaryFile()
- newf = py.io.dupfile(f, encoding="UTF-8")
+ newf = dupfile(f, encoding="UTF-8")
f.close()
return newf
def _makestringio(self):
- return py.io.TextIO()
+ return TextIO()
def _getcapture(self, method):
if method == "fd":
- return py.io.StdCaptureFD(now=False,
- out=self._maketempfile(), err=self._maketempfile()
+ return StdCaptureFD(
+ out=self._maketempfile(),
+ err=self._maketempfile(),
)
elif method == "sys":
- return py.io.StdCapture(now=False,
- out=self._makestringio(), err=self._makestringio()
+ return StdCapture(
+ out=self._makestringio(),
+ err=self._makestringio(),
)
elif method == "no":
return NoCapture()
@@ -74,23 +147,24 @@
method = config._conftest.rget("option_capture", path=fspath)
except KeyError:
method = "fd"
- if method == "fd" and not hasattr(os, 'dup'): # e.g. jython
+ if method == "fd" and not hasattr(os, 'dup'): # e.g. jython
method = "sys"
return method
def reset_capturings(self):
- for name, cap in self._method2capture.items():
+ for cap in self._method2capture.values():
cap.reset()
def resumecapture_item(self, item):
method = self._getmethod(item.config, item.fspath)
if not hasattr(item, 'outerr'):
- item.outerr = ('', '') # we accumulate outerr on the item
+ item.outerr = ('', '') # we accumulate outerr on the item
return self.resumecapture(method)
def resumecapture(self, method=None):
if hasattr(self, '_capturing'):
- raise ValueError("cannot resume, already capturing with %r" %
+ raise ValueError(
+ "cannot resume, already capturing with %r" %
(self._capturing,))
if method is None:
method = self._defaultmethod
@@ -119,30 +193,29 @@
return "", ""
def activate_funcargs(self, pyfuncitem):
- if not hasattr(pyfuncitem, 'funcargs'):
- return
- assert not hasattr(self, '_capturing_funcargs')
- self._capturing_funcargs = capturing_funcargs = []
- for name, capfuncarg in pyfuncitem.funcargs.items():
- if name in ('capsys', 'capfd'):
- capturing_funcargs.append(capfuncarg)
- capfuncarg._start()
+ funcargs = getattr(pyfuncitem, "funcargs", None)
+ if funcargs is not None:
+ for name, capfuncarg in funcargs.items():
+ if name in ('capsys', 'capfd'):
+ assert not hasattr(self, '_capturing_funcarg')
+ self._capturing_funcarg = capfuncarg
+ capfuncarg._start()
def deactivate_funcargs(self):
- capturing_funcargs = getattr(self, '_capturing_funcargs', None)
- if capturing_funcargs is not None:
- while capturing_funcargs:
- capfuncarg = capturing_funcargs.pop()
- capfuncarg._finalize()
- del self._capturing_funcargs
+ capturing_funcarg = getattr(self, '_capturing_funcarg', None)
+ if capturing_funcarg:
+ outerr = capturing_funcarg._finalize()
+ del self._capturing_funcarg
+ return outerr
def pytest_make_collect_report(self, __multicall__, collector):
method = self._getmethod(collector.config, collector.fspath)
try:
self.resumecapture(method)
except ValueError:
- return # recursive collect, XXX refactor capturing
- # to allow for more lightweight recursive capturing
+ # recursive collect, XXX refactor capturing
+ # to allow for more lightweight recursive capturing
+ return
try:
rep = __multicall__.execute()
finally:
@@ -169,46 +242,371 @@
@pytest.mark.tryfirst
def pytest_runtest_makereport(self, __multicall__, item, call):
- self.deactivate_funcargs()
+ funcarg_outerr = self.deactivate_funcargs()
rep = __multicall__.execute()
outerr = self.suspendcapture(item)
- if not rep.passed:
- addouterr(rep, outerr)
+ if funcarg_outerr is not None:
+ outerr = (outerr[0] + funcarg_outerr[0],
+ outerr[1] + funcarg_outerr[1])
+ addouterr(rep, outerr)
if not rep.passed or rep.when == "teardown":
outerr = ('', '')
item.outerr = outerr
return rep
+error_capsysfderror = "cannot use capsys and capfd at the same time"
+
+
def pytest_funcarg__capsys(request):
"""enables capturing of writes to sys.stdout/sys.stderr and makes
captured output available via ``capsys.readouterr()`` method calls
which return a ``(out, err)`` tuple.
"""
- return CaptureFuncarg(py.io.StdCapture)
+ if "capfd" in request._funcargs:
+ raise request.raiseerror(error_capsysfderror)
+ return CaptureFixture(StdCapture)
+
def pytest_funcarg__capfd(request):
"""enables capturing of writes to file descriptors 1 and 2 and makes
captured output available via ``capsys.readouterr()`` method calls
which return a ``(out, err)`` tuple.
"""
+ if "capsys" in request._funcargs:
+ request.raiseerror(error_capsysfderror)
if not hasattr(os, 'dup'):
- py.test.skip("capfd funcarg needs os.dup")
- return CaptureFuncarg(py.io.StdCaptureFD)
+ pytest.skip("capfd funcarg needs os.dup")
+ return CaptureFixture(StdCaptureFD)
-class CaptureFuncarg:
+
+class CaptureFixture:
def __init__(self, captureclass):
- self.capture = captureclass(now=False)
+ self._capture = captureclass()
def _start(self):
- self.capture.startall()
+ self._capture.startall()
def _finalize(self):
- if hasattr(self, 'capture'):
- self.capture.reset()
- del self.capture
+ if hasattr(self, '_capture'):
+ outerr = self._outerr = self._capture.reset()
+ del self._capture
+ return outerr
def readouterr(self):
- return self.capture.readouterr()
+ try:
+ return self._capture.readouterr()
+ except AttributeError:
+ return self._outerr
def close(self):
self._finalize()
+
+
+class FDCapture:
+ """ Capture IO to/from a given os-level filedescriptor. """
+
+ def __init__(self, targetfd, tmpfile=None, patchsys=False):
+ """ save targetfd descriptor, and open a new
+ temporary file there. If no tmpfile is
+ specified a tempfile.Tempfile() will be opened
+ in text mode.
+ """
+ self.targetfd = targetfd
+ if tmpfile is None and targetfd != 0:
+ f = tempfile.TemporaryFile('wb+')
+ tmpfile = dupfile(f, encoding="UTF-8")
+ f.close()
+ self.tmpfile = tmpfile
+ self._savefd = os.dup(self.targetfd)
+ if patchsys:
+ self._oldsys = getattr(sys, patchsysdict[targetfd])
+
+ def start(self):
+ try:
+ os.fstat(self._savefd)
+ except OSError:
+ raise ValueError(
+ "saved filedescriptor not valid, "
+ "did you call start() twice?")
+ if self.targetfd == 0 and not self.tmpfile:
+ fd = os.open(os.devnull, os.O_RDONLY)
+ os.dup2(fd, 0)
+ os.close(fd)
+ if hasattr(self, '_oldsys'):
+ setattr(sys, patchsysdict[self.targetfd], DontReadFromInput())
+ else:
+ os.dup2(self.tmpfile.fileno(), self.targetfd)
+ if hasattr(self, '_oldsys'):
+ setattr(sys, patchsysdict[self.targetfd], self.tmpfile)
+
+ def done(self):
+ """ unpatch and clean up, returns the self.tmpfile (file object)
+ """
+ os.dup2(self._savefd, self.targetfd)
+ os.close(self._savefd)
+ if self.targetfd != 0:
+ self.tmpfile.seek(0)
+ if hasattr(self, '_oldsys'):
+ setattr(sys, patchsysdict[self.targetfd], self._oldsys)
+ return self.tmpfile
+
+ def writeorg(self, data):
+ """ write a string to the original file descriptor
+ """
+ tempfp = tempfile.TemporaryFile()
+ try:
+ os.dup2(self._savefd, tempfp.fileno())
+ tempfp.write(data)
+ finally:
+ tempfp.close()
+
+
+def dupfile(f, mode=None, buffering=0, raising=False, encoding=None):
+ """ return a new open file object that's a duplicate of f
+
+ mode is duplicated if not given, 'buffering' controls
+ buffer size (defaulting to no buffering) and 'raising'
+ defines whether an exception is raised when an incompatible
+ file object is passed in (if raising is False, the file
+ object itself will be returned)
+ """
+ try:
+ fd = f.fileno()
+ mode = mode or f.mode
+ except AttributeError:
+ if raising:
+ raise
+ return f
+ newfd = os.dup(fd)
+ if sys.version_info >= (3, 0):
+ if encoding is not None:
+ mode = mode.replace("b", "")
+ buffering = True
+ return os.fdopen(newfd, mode, buffering, encoding, closefd=True)
+ else:
+ f = os.fdopen(newfd, mode, buffering)
+ if encoding is not None:
+ return EncodedFile(f, encoding)
+ return f
+
+
+class EncodedFile(object):
+ def __init__(self, _stream, encoding):
+ self._stream = _stream
+ self.encoding = encoding
+
+ def write(self, obj):
+ if isinstance(obj, unicode):
+ obj = obj.encode(self.encoding)
+ self._stream.write(obj)
+
+ def writelines(self, linelist):
+ data = ''.join(linelist)
+ self.write(data)
+
+ def __getattr__(self, name):
+ return getattr(self._stream, name)
+
+
+class Capture(object):
+ def reset(self):
+ """ reset sys.stdout/stderr and return captured output as strings. """
+ if hasattr(self, '_reset'):
+ raise ValueError("was already reset")
+ self._reset = True
+ outfile, errfile = self.done(save=False)
+ out, err = "", ""
+ if outfile and not outfile.closed:
+ out = outfile.read()
+ outfile.close()
+ if errfile and errfile != outfile and not errfile.closed:
+ err = errfile.read()
+ errfile.close()
+ return out, err
+
+ def suspend(self):
+ """ return current snapshot captures, memorize tempfiles. """
+ outerr = self.readouterr()
+ outfile, errfile = self.done()
+ return outerr
+
+
+class StdCaptureFD(Capture):
+ """ This class allows to capture writes to FD1 and FD2
+ and may connect a NULL file to FD0 (and prevent
+ reads from sys.stdin). If any of the 0,1,2 file descriptors
+ is invalid it will not be captured.
+ """
+ def __init__(self, out=True, err=True, in_=True, patchsys=True):
+ self._options = {
+ "out": out,
+ "err": err,
+ "in_": in_,
+ "patchsys": patchsys,
+ }
+ self._save()
+
+ def _save(self):
+ in_ = self._options['in_']
+ out = self._options['out']
+ err = self._options['err']
+ patchsys = self._options['patchsys']
+ if in_:
+ try:
+ self.in_ = FDCapture(
+ 0, tmpfile=None,
+ patchsys=patchsys)
+ except OSError:
+ pass
+ if out:
+ tmpfile = None
+ if hasattr(out, 'write'):
+ tmpfile = out
+ try:
+ self.out = FDCapture(
+ 1, tmpfile=tmpfile,
+ patchsys=patchsys)
+ self._options['out'] = self.out.tmpfile
+ except OSError:
+ pass
+ if err:
+ if hasattr(err, 'write'):
+ tmpfile = err
+ else:
+ tmpfile = None
+ try:
+ self.err = FDCapture(
+ 2, tmpfile=tmpfile,
+ patchsys=patchsys)
+ self._options['err'] = self.err.tmpfile
+ except OSError:
+ pass
+
+ def startall(self):
+ if hasattr(self, 'in_'):
+ self.in_.start()
+ if hasattr(self, 'out'):
+ self.out.start()
+ if hasattr(self, 'err'):
+ self.err.start()
+
+ def resume(self):
+ """ resume capturing with original temp files. """
+ self.startall()
+
+ def done(self, save=True):
+ """ return (outfile, errfile) and stop capturing. """
+ outfile = errfile = None
+ if hasattr(self, 'out') and not self.out.tmpfile.closed:
+ outfile = self.out.done()
+ if hasattr(self, 'err') and not self.err.tmpfile.closed:
+ errfile = self.err.done()
+ if hasattr(self, 'in_'):
+ self.in_.done()
+ if save:
+ self._save()
+ return outfile, errfile
+
+ def readouterr(self):
+ """ return snapshot value of stdout/stderr capturings. """
+ out = self._readsnapshot('out')
+ err = self._readsnapshot('err')
+ return out, err
+
+ def _readsnapshot(self, name):
+ if hasattr(self, name):
+ f = getattr(self, name).tmpfile
+ else:
+ return ''
+
+ f.seek(0)
+ res = f.read()
+ enc = getattr(f, "encoding", None)
+ if enc:
+ res = py.builtin._totext(res, enc, "replace")
+ f.truncate(0)
+ f.seek(0)
+ return res
+
+
+class StdCapture(Capture):
+ """ This class allows to capture writes to sys.stdout|stderr "in-memory"
+ and will raise errors on tries to read from sys.stdin. It only
+ modifies sys.stdout|stderr|stdin attributes and does not
+ touch underlying File Descriptors (use StdCaptureFD for that).
+ """
+ def __init__(self, out=True, err=True, in_=True):
+ self._oldout = sys.stdout
+ self._olderr = sys.stderr
+ self._oldin = sys.stdin
+ if out and not hasattr(out, 'file'):
+ out = TextIO()
+ self.out = out
+ if err:
+ if not hasattr(err, 'write'):
+ err = TextIO()
+ self.err = err
+ self.in_ = in_
+
+ def startall(self):
+ if self.out:
+ sys.stdout = self.out
+ if self.err:
+ sys.stderr = self.err
+ if self.in_:
+ sys.stdin = self.in_ = DontReadFromInput()
+
+ def done(self, save=True):
+ """ return (outfile, errfile) and stop capturing. """
+ outfile = errfile = None
+ if self.out and not self.out.closed:
+ sys.stdout = self._oldout
+ outfile = self.out
+ outfile.seek(0)
+ if self.err and not self.err.closed:
+ sys.stderr = self._olderr
+ errfile = self.err
+ errfile.seek(0)
+ if self.in_:
+ sys.stdin = self._oldin
+ return outfile, errfile
+
+ def resume(self):
+ """ resume capturing with original temp files. """
+ self.startall()
+
+ def readouterr(self):
+ """ return snapshot value of stdout/stderr capturings. """
+ out = err = ""
+ if self.out:
+ out = self.out.getvalue()
+ self.out.truncate(0)
+ self.out.seek(0)
+ if self.err:
+ err = self.err.getvalue()
+ self.err.truncate(0)
+ self.err.seek(0)
+ return out, err
+
+
+class DontReadFromInput:
+ """Temporary stub class. Ideally when stdin is accessed, the
+ capturing should be turned off, with possibly all data captured
+ so far sent to the screen. This should be configurable, though,
+ because in automated test runs it is better to crash than
+ hang indefinitely.
+ """
+ def read(self, *args):
+ raise IOError("reading from stdin while output is captured")
+ readline = read
+ readlines = read
+ __iter__ = read
+
+ def fileno(self):
+ raise ValueError("redirected Stdin is pseudofile, has no fileno()")
+
+ def isatty(self):
+ return False
+
+ def close(self):
+ pass
diff --git a/_pytest/config.py b/_pytest/config.py
--- a/_pytest/config.py
+++ b/_pytest/config.py
@@ -1,25 +1,91 @@
""" command line options, ini-file and conftest.py processing. """
import py
+# DON't import pytest here because it causes import cycle troubles
import sys, os
+from _pytest import hookspec # the extension point definitions
from _pytest.core import PluginManager
-import pytest
-def pytest_cmdline_parse(pluginmanager, args):
- config = Config(pluginmanager)
- config.parse(args)
- return config
+# pytest startup
-def pytest_unconfigure(config):
- while 1:
- try:
- fin = config._cleanup.pop()
- except IndexError:
- break
- fin()
+def main(args=None, plugins=None):
+ """ return exit code, after performing an in-process test run.
+
+ :arg args: list of command line arguments.
+
+ :arg plugins: list of plugin objects to be auto-registered during
+ initialization.
+ """
+ config = _prepareconfig(args, plugins)
+ return config.hook.pytest_cmdline_main(config=config)
+
+class cmdline: # compatibility namespace
+ main = staticmethod(main)
+
+class UsageError(Exception):
+ """ error in pytest usage or invocation"""
+
+_preinit = []
+
+default_plugins = (
+ "mark main terminal runner python pdb unittest capture skipping "
+ "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion genscript "
+ "junitxml resultlog doctest").split()
+
+def _preloadplugins():
+ assert not _preinit
+ _preinit.append(get_plugin_manager())
+
+def get_plugin_manager():
+ if _preinit:
+ return _preinit.pop(0)
+ # subsequent calls to main will create a fresh instance
+ pluginmanager = PytestPluginManager()
+ pluginmanager.config = Config(pluginmanager) # XXX attr needed?
+ for spec in default_plugins:
+ pluginmanager.import_plugin(spec)
+ return pluginmanager
+
+def _prepareconfig(args=None, plugins=None):
+ if args is None:
+ args = sys.argv[1:]
+ elif isinstance(args, py.path.local):
+ args = [str(args)]
+ elif not isinstance(args, (tuple, list)):
+ if not isinstance(args, str):
+ raise ValueError("not a string or argument list: %r" % (args,))
+ args = py.std.shlex.split(args)
+ pluginmanager = get_plugin_manager()
+ if plugins:
+ for plugin in plugins:
+ pluginmanager.register(plugin)
+ return pluginmanager.hook.pytest_cmdline_parse(
+ pluginmanager=pluginmanager, args=args)
+
+class PytestPluginManager(PluginManager):
+ def __init__(self, hookspecs=[hookspec]):
+ super(PytestPluginManager, self).__init__(hookspecs=hookspecs)
+ self.register(self)
+ if os.environ.get('PYTEST_DEBUG'):
+ err = sys.stderr
+ encoding = getattr(err, 'encoding', 'utf8')
+ try:
+ err = py.io.dupfile(err, encoding=encoding)
+ except Exception:
+ pass
+ self.trace.root.setwriter(err.write)
+
+ def pytest_configure(self, config):
+ config.addinivalue_line("markers",
+ "tryfirst: mark a hook implementation function such that the "
+ "plugin machinery will try to call it first/as early as possible.")
+ config.addinivalue_line("markers",
+ "trylast: mark a hook implementation function such that the "
+ "plugin machinery will try to call it last/as late as possible.")
+
class Parser:
- """ Parser for command line arguments. """
+ """ Parser for command line arguments and ini-file values. """
def __init__(self, usage=None, processopt=None):
self._anonymous = OptionGroup("custom options", parser=self)
@@ -35,15 +101,17 @@
if option.dest:
self._processopt(option)
- def addnote(self, note):
- self._notes.append(note)
-
def getgroup(self, name, description="", after=None):
""" get (or create) a named option Group.
- :name: unique name of the option group.
+ :name: name of the option group.
:description: long description for --help output.
:after: name of other group, used for ordering --help output.
+
+ The returned group object has an ``addoption`` method with the same
+ signature as :py:func:`parser.addoption
+ <_pytest.config.Parser.addoption>` but will be shown in the
+ respective group in the output of ``pytest. --help``.
"""
for group in self._groups:
if group.name == name:
@@ -57,33 +125,222 @@
return group
def addoption(self, *opts, **attrs):
- """ add an optparse-style option. """
+ """ register a command line option.
+
+ :opts: option names, can be short or long options.
+ :attrs: same attributes which the ``add_option()`` function of the
+ `argparse library
+ `_
+ accepts.
+
+ After command line parsing options are available on the pytest config
+ object via ``config.option.NAME`` where ``NAME`` is usually set
+ by passing a ``dest`` attribute, for example
+ ``addoption("--long", dest="NAME", ...)``.
+ """
self._anonymous.addoption(*opts, **attrs)
def parse(self, args):
- self.optparser = optparser = MyOptionParser(self)
+ from _pytest._argcomplete import try_argcomplete
+ self.optparser = self._getparser()
+ try_argcomplete(self.optparser)
+ return self.optparser.parse_args([str(x) for x in args])
+
+ def _getparser(self):
+ from _pytest._argcomplete import filescompleter
+ optparser = MyOptionParser(self)
groups = self._groups + [self._anonymous]
for group in groups:
if group.options:
desc = group.description or group.name
- optgroup = py.std.optparse.OptionGroup(optparser, desc)
- optgroup.add_options(group.options)
- optparser.add_option_group(optgroup)
- return self.optparser.parse_args([str(x) for x in args])
+ arggroup = optparser.add_argument_group(desc)
+ for option in group.options:
+ n = option.names()
+ a = option.attrs()
+ arggroup.add_argument(*n, **a)
+ # bash like autocompletion for dirs (appending '/')
+ optparser.add_argument(FILE_OR_DIR, nargs='*'
+ ).completer=filescompleter
+ return optparser
def parse_setoption(self, args, option):
- parsedoption, args = self.parse(args)
+ parsedoption = self.parse(args)
for name, value in parsedoption.__dict__.items():
setattr(option, name, value)
- return args
+ return getattr(parsedoption, FILE_OR_DIR)
+
+ def parse_known_args(self, args):
+ optparser = self._getparser()
+ args = [str(x) for x in args]
+ return optparser.parse_known_args(args)[0]
def addini(self, name, help, type=None, default=None):
- """ add an ini-file option with the given name and description. """
+ """ register an ini-file option.
+
+ :name: name of the ini-variable
+ :type: type of the variable, can be ``pathlist``, ``args`` or ``linelist``.
+ :default: default value if no ini-file option exists but is queried.
+
+ The value of ini-variables can be retrieved via a call to
+ :py:func:`config.getini(name) <_pytest.config.Config.getini>`.
+ """
assert type in (None, "pathlist", "args", "linelist")
self._inidict[name] = (help, type, default)
self._ininames.append(name)
+class ArgumentError(Exception):
+ """
+ Raised if an Argument instance is created with invalid or
+ inconsistent arguments.
+ """
+
+ def __init__(self, msg, option):
+ self.msg = msg
+ self.option_id = str(option)
+
+ def __str__(self):
+ if self.option_id:
+ return "option %s: %s" % (self.option_id, self.msg)
+ else:
+ return self.msg
+
+
+class Argument:
+ """class that mimics the necessary behaviour of py.std.optparse.Option """
+ _typ_map = {
+ 'int': int,
+ 'string': str,
+ }
+ # enable after some grace period for plugin writers
+ TYPE_WARN = False
+
+ def __init__(self, *names, **attrs):
+ """store parms in private vars for use in add_argument"""
+ self._attrs = attrs
+ self._short_opts = []
+ self._long_opts = []
+ self.dest = attrs.get('dest')
+ if self.TYPE_WARN:
+ try:
+ help = attrs['help']
+ if '%default' in help:
+ py.std.warnings.warn(
+ 'pytest now uses argparse. "%default" should be'
+ ' changed to "%(default)s" ',
+ FutureWarning,
+ stacklevel=3)
+ except KeyError:
+ pass
+ try:
+ typ = attrs['type']
+ except KeyError:
+ pass
+ else:
+ # this might raise a keyerror as well, don't want to catch that
+ if isinstance(typ, py.builtin._basestring):
+ if typ == 'choice':
+ if self.TYPE_WARN:
+ py.std.warnings.warn(
+ 'type argument to addoption() is a string %r.'
+ ' For parsearg this is optional and when supplied '
+ ' should be a type.'
+ ' (options: %s)' % (typ, names),
+ FutureWarning,
+ stacklevel=3)
+ # argparse expects a type here take it from
+ # the type of the first element
+ attrs['type'] = type(attrs['choices'][0])
+ else:
+ if self.TYPE_WARN:
+ py.std.warnings.warn(
+ 'type argument to addoption() is a string %r.'
+ ' For parsearg this should be a type.'
+ ' (options: %s)' % (typ, names),
+ FutureWarning,
+ stacklevel=3)
+ attrs['type'] = Argument._typ_map[typ]
+ # used in test_parseopt -> test_parse_defaultgetter
+ self.type = attrs['type']
+ else:
+ self.type = typ
+ try:
+ # attribute existence is tested in Config._processopt
+ self.default = attrs['default']
+ except KeyError:
+ pass
+ self._set_opt_strings(names)
+ if not self.dest:
+ if self._long_opts:
+ self.dest = self._long_opts[0][2:].replace('-', '_')
+ else:
+ try:
+ self.dest = self._short_opts[0][1:]
+ except IndexError:
+ raise ArgumentError(
+ 'need a long or short option', self)
+
+ def names(self):
+ return self._short_opts + self._long_opts
+
+ def attrs(self):
+ # update any attributes set by processopt
+ attrs = 'default dest help'.split()
+ if self.dest:
+ attrs.append(self.dest)
+ for attr in attrs:
+ try:
+ self._attrs[attr] = getattr(self, attr)
+ except AttributeError:
+ pass
+ if self._attrs.get('help'):
+ a = self._attrs['help']
+ a = a.replace('%default', '%(default)s')
+ #a = a.replace('%prog', '%(prog)s')
+ self._attrs['help'] = a
+ return self._attrs
+
+ def _set_opt_strings(self, opts):
+ """directly from optparse
+
+ might not be necessary as this is passed to argparse later on"""
+ for opt in opts:
+ if len(opt) < 2:
+ raise ArgumentError(
+ "invalid option string %r: "
+ "must be at least two characters long" % opt, self)
+ elif len(opt) == 2:
+ if not (opt[0] == "-" and opt[1] != "-"):
+ raise ArgumentError(
+ "invalid short option string %r: "
+ "must be of the form -x, (x any non-dash char)" % opt,
+ self)
+ self._short_opts.append(opt)
+ else:
+ if not (opt[0:2] == "--" and opt[2] != "-"):
+ raise ArgumentError(
+ "invalid long option string %r: "
+ "must start with --, followed by non-dash" % opt,
+ self)
+ self._long_opts.append(opt)
+
+ def __repr__(self):
+ retval = 'Argument('
+ if self._short_opts:
+ retval += '_short_opts: ' + repr(self._short_opts) + ', '
+ if self._long_opts:
+ retval += '_long_opts: ' + repr(self._long_opts) + ', '
+ retval += 'dest: ' + repr(self.dest) + ', '
+ if hasattr(self, 'type'):
+ retval += 'type: ' + repr(self.type) + ', '
+ if hasattr(self, 'default'):
+ retval += 'default: ' + repr(self.default) + ', '
+ if retval[-2:] == ', ': # always long enough to test ("Argument(" )
+ retval = retval[:-2]
+ retval += ')'
+ return retval
+
+
class OptionGroup:
def __init__(self, name, description="", parser=None):
self.name = name
@@ -92,12 +349,18 @@
self.parser = parser
def addoption(self, *optnames, **attrs):
- """ add an option to this group. """
- option = py.std.optparse.Option(*optnames, **attrs)
+ """ add an option to this group.
+
+ if a shortened version of a long option is specified it will
+ be suppressed in the help. addoption('--twowords', '--two-words')
+ results in help showing '--two-words' only, but --twowords gets
+ accepted **and** the automatic destination is in args.twowords
+ """
+ option = Argument(*optnames, **attrs)
self._addoption_instance(option, shortupper=False)
def _addoption(self, *optnames, **attrs):
- option = py.std.optparse.Option(*optnames, **attrs)
+ option = Argument(*optnames, **attrs)
self._addoption_instance(option, shortupper=True)
From noreply at buildbot.pypy.org Mon Aug 18 22:25:47 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Mon, 18 Aug 2014 22:25:47 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7: Test fix
Message-ID: <20140818202547.1F86C1D2AE7@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7
Changeset: r72896:7d12f0c90a20
Date: 2014-08-18 22:12 +0200
http://bitbucket.org/pypy/pypy/changeset/7d12f0c90a20/
Log: Test fix
diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py
--- a/rpython/jit/backend/llsupport/assembler.py
+++ b/rpython/jit/backend/llsupport/assembler.py
@@ -74,10 +74,11 @@
self.gc_minimal_size_in_nursery = gc_ll_descr.minimal_size_in_nursery
else:
self.gc_minimal_size_in_nursery = 0
- try:
- self.gc_size_of_header = gc_ll_descr.gcheaderbuilder.size_gc_header
- except AttributeError:
- self.gc_size_of_header = WORD # for tests
+ gc_size_of_header = WORD # for tests
+ if hasattr(gc_ll_descr, 'gcheaderbuilder'):
+ if hasattr(gc_ll_descr.gcheaderbuilder, 'size_gc_header'):
+ gc_size_of_header = gc_ll_descr.gcheaderbuilder.size_gc_header
+ self.gc_size_of_header = gc_size_of_header
self.memcpy_addr = self.cpu.cast_ptr_to_int(memcpy_fn)
# building the barriers needs to happen before these:
self._build_failure_recovery(False, withfloats=False)
From noreply at buildbot.pypy.org Mon Aug 18 22:25:48 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Mon, 18 Aug 2014 22:25:48 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7: Try to make non-stm jit backend tests
passing again, but not done yet
Message-ID: <20140818202548.5D4BF1D2AE7@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7
Changeset: r72897:dd825891d2a8
Date: 2014-08-18 22:24 +0200
http://bitbucket.org/pypy/pypy/changeset/dd825891d2a8/
Log: Try to make non-stm jit backend tests passing again, but not done
yet
diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py
--- a/rpython/jit/backend/x86/assembler.py
+++ b/rpython/jit/backend/x86/assembler.py
@@ -2607,6 +2607,7 @@
# so if it is followed with a JB, it will follow the jump if
# we should break the transaction now.
#
+ assert self.cpu.gc_ll_descr.stm
if not IS_X86_64:
todo() # "needed for X86_64_SCRATCH_REG"
psnlfm_adr = rstm.adr_pypy_stm_nursery_low_fill_mark
@@ -2631,6 +2632,7 @@
self.implement_guard(guard_token, 'AE') # JAE goes to "no, don't"
def genop_discard_stm_read(self, op, arglocs):
+ assert self.cpu.gc_ll_descr.stm
if not IS_X86_64:
todo() # "needed for X86_64_SCRATCH_REG"
mc = self.mc
diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py
--- a/rpython/translator/c/funcgen.py
+++ b/rpython/translator/c/funcgen.py
@@ -310,6 +310,8 @@
line = meth(self, op)
elif op.opname.startswith('stm_'):
if not self._is_stm():
+ if op.opname in ('stm_ignored_start', 'stm_ignored_stop'):
+ return
raise AssertionError("STM transformation not applied. "
"You need '--stm'")
from rpython.translator.stm import funcgen
From noreply at buildbot.pypy.org Tue Aug 19 08:50:41 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Tue, 19 Aug 2014 08:50:41 +0200 (CEST)
Subject: [pypy-commit] pypy default: Issue #1848: patch by paulie4
Message-ID: <20140819065041.B5D4C1C0250@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r72898:8bc429151c06
Date: 2014-08-19 08:50 +0200
http://bitbucket.org/pypy/pypy/changeset/8bc429151c06/
Log: Issue #1848: patch by paulie4
diff --git a/lib_pypy/pyrepl/reader.py b/lib_pypy/pyrepl/reader.py
--- a/lib_pypy/pyrepl/reader.py
+++ b/lib_pypy/pyrepl/reader.py
@@ -93,7 +93,7 @@
st = {}
for c in map(unichr, range(256)):
st[c] = SYNTAX_SYMBOL
- for c in [a for a in map(unichr, range(256)) if a.isalpha()]:
+ for c in [a for a in map(unichr, range(256)) if a.isalnum()]:
st[c] = SYNTAX_WORD
st[u'\n'] = st[u' '] = SYNTAX_WHITESPACE
return st
From noreply at buildbot.pypy.org Tue Aug 19 09:32:15 2014
From: noreply at buildbot.pypy.org (Raemi)
Date: Tue, 19 Aug 2014 09:32:15 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7: fix for restoring atomic state in
start_if_not_atomic
Message-ID: <20140819073215.1F9E31C0250@cobra.cs.uni-duesseldorf.de>
Author: Remi Meier
Branch: stmgc-c7
Changeset: r72899:2315ec757d57
Date: 2014-08-19 09:31 +0200
http://bitbucket.org/pypy/pypy/changeset/2315ec757d57/
Log: fix for restoring atomic state in start_if_not_atomic
diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c
--- a/rpython/translator/stm/src_stm/stmgcintf.c
+++ b/rpython/translator/stm/src_stm/stmgcintf.c
@@ -159,7 +159,7 @@
pypy_stm_nursery_low_fill_mark = _stm_nursery_start + limit;
}
-static long _pypy_stm_start_transaction(void)
+long _pypy_stm_start_transaction(void)
{
pypy_stm_nursery_low_fill_mark = 1; /* will be set to a correct value below */
long counter = stm_start_transaction(&stm_thread_local);
diff --git a/rpython/translator/stm/src_stm/stmgcintf.h b/rpython/translator/stm/src_stm/stmgcintf.h
--- a/rpython/translator/stm/src_stm/stmgcintf.h
+++ b/rpython/translator/stm/src_stm/stmgcintf.h
@@ -26,6 +26,7 @@
void _pypy_stm_initialize_nursery_low_fill_mark(long v_counter);
void _pypy_stm_inev_state(void);
+long _pypy_stm_start_transaction(void);
void _pypy_stm_become_inevitable(const char *);
void pypy_stm_become_globally_unique_transaction(void);
@@ -56,9 +57,7 @@
static inline void pypy_stm_start_if_not_atomic(void) {
if (pypy_stm_ready_atomic == 1) {
int e = errno;
- stm_start_transaction(&stm_thread_local);
- _pypy_stm_initialize_nursery_low_fill_mark(0);
- _pypy_stm_inev_state();
+ _pypy_stm_start_transaction();
errno = e;
}
}
From noreply at buildbot.pypy.org Tue Aug 19 10:42:54 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Tue, 19 Aug 2014 10:42:54 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: Merge default into
gc-incminimark-pinning
Message-ID: <20140819084254.3E7901C0250@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72900:907a486e6ace
Date: 2014-08-18 22:19 +0200
http://bitbucket.org/pypy/pypy/changeset/907a486e6ace/
Log: Merge default into gc-incminimark-pinning
gc-incminimark-pinning based on default now, instead of
release-2.3.x
diff too long, truncating to 2000 out of 35830 lines
diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -10,3 +10,7 @@
20e51c4389ed4469b66bb9d6289ce0ecfc82c4b9 release-2.3.0
0000000000000000000000000000000000000000 release-2.3.0
394146e9bb673514c61f0150ab2013ccf78e8de7 release-2.3
+32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.2=3.1
+32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.3.1
+32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.2=3.1
+0000000000000000000000000000000000000000 release-2.2=3.1
diff --git a/_pytest/__init__.py b/_pytest/__init__.py
--- a/_pytest/__init__.py
+++ b/_pytest/__init__.py
@@ -1,2 +1,2 @@
#
-__version__ = '2.2.4.dev2'
+__version__ = '2.5.2'
diff --git a/_pytest/_argcomplete.py b/_pytest/_argcomplete.py
new file mode 100644
--- /dev/null
+++ b/_pytest/_argcomplete.py
@@ -0,0 +1,104 @@
+
+"""allow bash-completion for argparse with argcomplete if installed
+needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail
+to find the magic string, so _ARGCOMPLETE env. var is never set, and
+this does not need special code.
+
+argcomplete does not support python 2.5 (although the changes for that
+are minor).
+
+Function try_argcomplete(parser) should be called directly before
+the call to ArgumentParser.parse_args().
+
+The filescompleter is what you normally would use on the positional
+arguments specification, in order to get "dirname/" after "dirn"
+instead of the default "dirname ":
+
+ optparser.add_argument(Config._file_or_dir, nargs='*'
+ ).completer=filescompleter
+
+Other, application specific, completers should go in the file
+doing the add_argument calls as they need to be specified as .completer
+attributes as well. (If argcomplete is not installed, the function the
+attribute points to will not be used).
+
+SPEEDUP
+=======
+The generic argcomplete script for bash-completion
+(/etc/bash_completion.d/python-argcomplete.sh )
+uses a python program to determine startup script generated by pip.
+You can speed up completion somewhat by changing this script to include
+ # PYTHON_ARGCOMPLETE_OK
+so the the python-argcomplete-check-easy-install-script does not
+need to be called to find the entry point of the code and see if that is
+marked with PYTHON_ARGCOMPLETE_OK
+
+INSTALL/DEBUGGING
+=================
+To include this support in another application that has setup.py generated
+scripts:
+- add the line:
+ # PYTHON_ARGCOMPLETE_OK
+ near the top of the main python entry point
+- include in the file calling parse_args():
+ from _argcomplete import try_argcomplete, filescompleter
+ , call try_argcomplete just before parse_args(), and optionally add
+ filescompleter to the positional arguments' add_argument()
+If things do not work right away:
+- switch on argcomplete debugging with (also helpful when doing custom
+ completers):
+ export _ARC_DEBUG=1
+- run:
+ python-argcomplete-check-easy-install-script $(which appname)
+ echo $?
+ will echo 0 if the magic line has been found, 1 if not
+- sometimes it helps to find early on errors using:
+ _ARGCOMPLETE=1 _ARC_DEBUG=1 appname
+ which should throw a KeyError: 'COMPLINE' (which is properly set by the
+ global argcomplete script).
+"""
+
+import sys
+import os
+from glob import glob
+
+class FastFilesCompleter:
+ 'Fast file completer class'
+ def __init__(self, directories=True):
+ self.directories = directories
+
+ def __call__(self, prefix, **kwargs):
+ """only called on non option completions"""
+ if os.path.sep in prefix[1:]: #
+ prefix_dir = len(os.path.dirname(prefix) + os.path.sep)
+ else:
+ prefix_dir = 0
+ completion = []
+ globbed = []
+ if '*' not in prefix and '?' not in prefix:
+ if prefix[-1] == os.path.sep: # we are on unix, otherwise no bash
+ globbed.extend(glob(prefix + '.*'))
+ prefix += '*'
+ globbed.extend(glob(prefix))
+ for x in sorted(globbed):
+ if os.path.isdir(x):
+ x += '/'
+ # append stripping the prefix (like bash, not like compgen)
+ completion.append(x[prefix_dir:])
+ return completion
+
+if os.environ.get('_ARGCOMPLETE'):
+ # argcomplete 0.5.6 is not compatible with python 2.5.6: print/with/format
+ if sys.version_info[:2] < (2, 6):
+ sys.exit(1)
+ try:
+ import argcomplete.completers
+ except ImportError:
+ sys.exit(-1)
+ filescompleter = FastFilesCompleter()
+
+ def try_argcomplete(parser):
+ argcomplete.autocomplete(parser)
+else:
+ def try_argcomplete(parser): pass
+ filescompleter = None
diff --git a/_pytest/assertion/__init__.py b/_pytest/assertion/__init__.py
--- a/_pytest/assertion/__init__.py
+++ b/_pytest/assertion/__init__.py
@@ -3,7 +3,6 @@
"""
import py
import sys
-import pytest
from _pytest.monkeypatch import monkeypatch
from _pytest.assertion import util
@@ -19,8 +18,8 @@
to provide assert expression information. """)
group.addoption('--no-assert', action="store_true", default=False,
dest="noassert", help="DEPRECATED equivalent to --assert=plain")
- group.addoption('--nomagic', action="store_true", default=False,
- dest="nomagic", help="DEPRECATED equivalent to --assert=plain")
+ group.addoption('--nomagic', '--no-magic', action="store_true",
+ default=False, help="DEPRECATED equivalent to --assert=plain")
class AssertionState:
"""State for the assertion plugin."""
@@ -35,22 +34,25 @@
mode = "plain"
if mode == "rewrite":
try:
- import ast
+ import ast # noqa
except ImportError:
mode = "reinterp"
else:
- if sys.platform.startswith('java'):
+ # Both Jython and CPython 2.6.0 have AST bugs that make the
+ # assertion rewriting hook malfunction.
+ if (sys.platform.startswith('java') or
+ sys.version_info[:3] == (2, 6, 0)):
mode = "reinterp"
if mode != "plain":
_load_modules(mode)
m = monkeypatch()
config._cleanup.append(m.undo)
m.setattr(py.builtin.builtins, 'AssertionError',
- reinterpret.AssertionError)
+ reinterpret.AssertionError) # noqa
hook = None
if mode == "rewrite":
- hook = rewrite.AssertionRewritingHook()
- sys.meta_path.append(hook)
+ hook = rewrite.AssertionRewritingHook() # noqa
+ sys.meta_path.insert(0, hook)
warn_about_missing_assertion(mode)
config._assertstate = AssertionState(config, mode)
config._assertstate.hook = hook
@@ -73,9 +75,16 @@
def callbinrepr(op, left, right):
hook_result = item.ihook.pytest_assertrepr_compare(
config=item.config, op=op, left=left, right=right)
+
for new_expl in hook_result:
if new_expl:
- res = '\n~'.join(new_expl)
+ # Don't include pageloads of data unless we are very
+ # verbose (-vv)
+ if (sum(len(p) for p in new_expl[1:]) > 80*8
+ and item.config.option.verbose < 2):
+ new_expl[1:] = [py.builtin._totext(
+ 'Detailed information truncated, use "-vv" to show')]
+ res = py.builtin._totext('\n~').join(new_expl)
if item.config.getvalue("assertmode") == "rewrite":
# The result will be fed back a python % formatting
# operation, which will fail if there are extraneous
@@ -95,9 +104,9 @@
def _load_modules(mode):
"""Lazily import assertion related code."""
global rewrite, reinterpret
- from _pytest.assertion import reinterpret
+ from _pytest.assertion import reinterpret # noqa
if mode == "rewrite":
- from _pytest.assertion import rewrite
+ from _pytest.assertion import rewrite # noqa
def warn_about_missing_assertion(mode):
try:
diff --git a/_pytest/assertion/newinterpret.py b/_pytest/assertion/newinterpret.py
--- a/_pytest/assertion/newinterpret.py
+++ b/_pytest/assertion/newinterpret.py
@@ -11,7 +11,7 @@
from _pytest.assertion.reinterpret import BuiltinAssertionError
-if sys.platform.startswith("java") and sys.version_info < (2, 5, 2):
+if sys.platform.startswith("java"):
# See http://bugs.jython.org/issue1497
_exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict",
"ListComp", "GeneratorExp", "Yield", "Compare", "Call",
diff --git a/_pytest/assertion/oldinterpret.py b/_pytest/assertion/oldinterpret.py
--- a/_pytest/assertion/oldinterpret.py
+++ b/_pytest/assertion/oldinterpret.py
@@ -526,10 +526,13 @@
# example:
def f():
return 5
+
def g():
return 3
+
def h(x):
return 'never'
+
check("f() * g() == 5")
check("not f()")
check("not (f() and g() or 0)")
diff --git a/_pytest/assertion/reinterpret.py b/_pytest/assertion/reinterpret.py
--- a/_pytest/assertion/reinterpret.py
+++ b/_pytest/assertion/reinterpret.py
@@ -1,18 +1,26 @@
import sys
import py
from _pytest.assertion.util import BuiltinAssertionError
+u = py.builtin._totext
+
class AssertionError(BuiltinAssertionError):
def __init__(self, *args):
BuiltinAssertionError.__init__(self, *args)
if args:
+ # on Python2.6 we get len(args)==2 for: assert 0, (x,y)
+ # on Python2.7 and above we always get len(args) == 1
+ # with args[0] being the (x,y) tuple.
+ if len(args) > 1:
+ toprint = args
+ else:
+ toprint = args[0]
try:
- self.msg = str(args[0])
- except py.builtin._sysex:
- raise
- except:
- self.msg = "<[broken __repr__] %s at %0xd>" %(
- args[0].__class__, id(args[0]))
+ self.msg = u(toprint)
+ except Exception:
+ self.msg = u(
+ "<[broken __repr__] %s at %0xd>"
+ % (toprint.__class__, id(toprint)))
else:
f = py.code.Frame(sys._getframe(1))
try:
@@ -44,4 +52,3 @@
from _pytest.assertion.newinterpret import interpret as reinterpret
else:
reinterpret = reinterpret_old
-
diff --git a/_pytest/assertion/rewrite.py b/_pytest/assertion/rewrite.py
--- a/_pytest/assertion/rewrite.py
+++ b/_pytest/assertion/rewrite.py
@@ -6,6 +6,7 @@
import imp
import marshal
import os
+import re
import struct
import sys
import types
@@ -14,13 +15,7 @@
from _pytest.assertion import util
-# Windows gives ENOENT in places *nix gives ENOTDIR.
-if sys.platform.startswith("win"):
- PATH_COMPONENT_NOT_DIR = errno.ENOENT
-else:
- PATH_COMPONENT_NOT_DIR = errno.ENOTDIR
-
-# py.test caches rewritten pycs in __pycache__.
+# pytest caches rewritten pycs in __pycache__.
if hasattr(imp, "get_tag"):
PYTEST_TAG = imp.get_tag() + "-PYTEST"
else:
@@ -34,17 +29,19 @@
PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1])
del ver, impl
-PYC_EXT = ".py" + "c" if __debug__ else "o"
+PYC_EXT = ".py" + (__debug__ and "c" or "o")
PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2)
+ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3
class AssertionRewritingHook(object):
- """Import hook which rewrites asserts."""
+ """PEP302 Import hook which rewrites asserts."""
def __init__(self):
self.session = None
self.modules = {}
+ self._register_with_pkg_resources()
def set_session(self, session):
self.fnpats = session.config.getini("python_files")
@@ -59,8 +56,12 @@
names = name.rsplit(".", 1)
lastname = names[-1]
pth = None
- if path is not None and len(path) == 1:
- pth = path[0]
+ if path is not None:
+ # Starting with Python 3.3, path is a _NamespacePath(), which
+ # causes problems if not converted to list.
+ path = list(path)
+ if len(path) == 1:
+ pth = path[0]
if pth is None:
try:
fd, fn, desc = imp.find_module(lastname, path)
@@ -95,12 +96,13 @@
finally:
self.session = sess
else:
- state.trace("matched test file (was specified on cmdline): %r" % (fn,))
+ state.trace("matched test file (was specified on cmdline): %r" %
+ (fn,))
# The requested module looks like a test file, so rewrite it. This is
# the most magical part of the process: load the source, rewrite the
# asserts, and load the rewritten source. We also cache the rewritten
# module code in a special pyc. We must be aware of the possibility of
- # concurrent py.test processes rewriting and loading pycs. To avoid
+ # concurrent pytest processes rewriting and loading pycs. To avoid
# tricky race conditions, we maintain the following invariant: The
# cached pyc is always a complete, valid pyc. Operations on it must be
# atomic. POSIX's atomic rename comes in handy.
@@ -116,19 +118,19 @@
# common case) or it's blocked by a non-dir node. In the
# latter case, we'll ignore it in _write_pyc.
pass
- elif e == PATH_COMPONENT_NOT_DIR:
+ elif e in [errno.ENOENT, errno.ENOTDIR]:
# One of the path components was not a directory, likely
# because we're in a zip file.
write = False
elif e == errno.EACCES:
- state.trace("read only directory: %r" % (fn_pypath.dirname,))
+ state.trace("read only directory: %r" % fn_pypath.dirname)
write = False
else:
raise
cache_name = fn_pypath.basename[:-3] + PYC_TAIL
pyc = os.path.join(cache_dir, cache_name)
- # Notice that even if we're in a read-only directory, I'm going to check
- # for a cached pyc. This may not be optimal...
+ # Notice that even if we're in a read-only directory, I'm going
+ # to check for a cached pyc. This may not be optimal...
co = _read_pyc(fn_pypath, pyc)
if co is None:
state.trace("rewriting %r" % (fn,))
@@ -153,27 +155,59 @@
mod.__file__ = co.co_filename
# Normally, this attribute is 3.2+.
mod.__cached__ = pyc
+ mod.__loader__ = self
py.builtin.exec_(co, mod.__dict__)
except:
del sys.modules[name]
raise
return sys.modules[name]
-def _write_pyc(co, source_path, pyc):
- # Technically, we don't have to have the same pyc format as (C)Python, since
- # these "pycs" should never be seen by builtin import. However, there's
- # little reason deviate, and I hope sometime to be able to use
- # imp.load_compiled to load them. (See the comment in load_module above.)
+
+
+ def is_package(self, name):
+ try:
+ fd, fn, desc = imp.find_module(name)
+ except ImportError:
+ return False
+ if fd is not None:
+ fd.close()
+ tp = desc[2]
+ return tp == imp.PKG_DIRECTORY
+
+ @classmethod
+ def _register_with_pkg_resources(cls):
+ """
+ Ensure package resources can be loaded from this loader. May be called
+ multiple times, as the operation is idempotent.
+ """
+ try:
+ import pkg_resources
+ # access an attribute in case a deferred importer is present
+ pkg_resources.__name__
+ except ImportError:
+ return
+
+ # Since pytest tests are always located in the file system, the
+ # DefaultProvider is appropriate.
+ pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider)
+
+
+def _write_pyc(state, co, source_path, pyc):
+ # Technically, we don't have to have the same pyc format as
+ # (C)Python, since these "pycs" should never be seen by builtin
+ # import. However, there's little reason deviate, and I hope
+ # sometime to be able to use imp.load_compiled to load them. (See
+ # the comment in load_module above.)
mtime = int(source_path.mtime())
try:
fp = open(pyc, "wb")
except IOError:
err = sys.exc_info()[1].errno
- if err == PATH_COMPONENT_NOT_DIR:
- # This happens when we get a EEXIST in find_module creating the
- # __pycache__ directory and __pycache__ is by some non-dir node.
- return False
- raise
+ state.trace("error writing pyc file at %s: errno=%s" %(pyc, err))
+ # we ignore any failure to write the cache file
+ # there are many reasons, permission-denied, __pycache__ being a
+ # file etc.
+ return False
try:
fp.write(imp.get_magic())
fp.write(struct.pack(">",
- ast.Add : "+",
- ast.Sub : "-",
- ast.Mult : "*",
- ast.Div : "/",
- ast.FloorDiv : "//",
- ast.Mod : "%",
- ast.Eq : "==",
- ast.NotEq : "!=",
- ast.Lt : "<",
- ast.LtE : "<=",
- ast.Gt : ">",
- ast.GtE : ">=",
- ast.Pow : "**",
- ast.Is : "is",
- ast.IsNot : "is not",
- ast.In : "in",
- ast.NotIn : "not in"
+ ast.BitOr: "|",
+ ast.BitXor: "^",
+ ast.BitAnd: "&",
+ ast.LShift: "<<",
+ ast.RShift: ">>",
+ ast.Add: "+",
+ ast.Sub: "-",
+ ast.Mult: "*",
+ ast.Div: "/",
+ ast.FloorDiv: "//",
+ ast.Mod: "%%", # escaped for string formatting
+ ast.Eq: "==",
+ ast.NotEq: "!=",
+ ast.Lt: "<",
+ ast.LtE: "<=",
+ ast.Gt: ">",
+ ast.GtE: ">=",
+ ast.Pow: "**",
+ ast.Is: "is",
+ ast.IsNot: "is not",
+ ast.In: "in",
+ ast.NotIn: "not in"
}
@@ -341,7 +408,7 @@
lineno = 0
for item in mod.body:
if (expect_docstring and isinstance(item, ast.Expr) and
- isinstance(item.value, ast.Str)):
+ isinstance(item.value, ast.Str)):
doc = item.value.s
if "PYTEST_DONT_REWRITE" in doc:
# The module has disabled assertion rewriting.
@@ -462,7 +529,8 @@
body.append(raise_)
# Clear temporary variables by setting them to None.
if self.variables:
- variables = [ast.Name(name, ast.Store()) for name in self.variables]
+ variables = [ast.Name(name, ast.Store())
+ for name in self.variables]
clear = ast.Assign(variables, ast.Name("None", ast.Load()))
self.statements.append(clear)
# Fix line numbers.
@@ -471,11 +539,12 @@
return self.statements
def visit_Name(self, name):
- # Check if the name is local or not.
+ # Display the repr of the name if it's a local variable or
+ # _should_repr_global_name() thinks it's acceptable.
locs = ast.Call(self.builtin("locals"), [], [], None, None)
- globs = ast.Call(self.builtin("globals"), [], [], None, None)
- ops = [ast.In(), ast.IsNot()]
- test = ast.Compare(ast.Str(name.id), ops, [locs, globs])
+ inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
+ dorepr = self.helper("should_repr_global_name", name)
+ test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
return name, self.explanation_param(expr)
@@ -492,7 +561,8 @@
for i, v in enumerate(boolop.values):
if i:
fail_inner = []
- self.on_failure.append(ast.If(cond, fail_inner, []))
+ # cond is set in a prior loop iteration below
+ self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa
self.on_failure = fail_inner
self.push_format_context()
res, expl = self.visit(v)
@@ -548,7 +618,8 @@
new_kwarg, expl = self.visit(call.kwargs)
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
- new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg)
+ new_call = ast.Call(new_func, new_args, new_kwargs,
+ new_star, new_kwarg)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
@@ -584,7 +655,7 @@
res_expr = ast.Compare(left_res, [op], [next_res])
self.statements.append(ast.Assign([store_names[i]], res_expr))
left_res, left_expl = next_res, next_expl
- # Use py.code._reprcompare if that's available.
+ # Use pytest.assertion.util._reprcompare if that's available.
expl_call = self.helper("call_reprcompare",
ast.Tuple(syms, ast.Load()),
ast.Tuple(load_names, ast.Load()),
diff --git a/_pytest/assertion/util.py b/_pytest/assertion/util.py
--- a/_pytest/assertion/util.py
+++ b/_pytest/assertion/util.py
@@ -1,8 +1,13 @@
"""Utilities for assertion debugging"""
import py
+try:
+ from collections import Sequence
+except ImportError:
+ Sequence = list
BuiltinAssertionError = py.builtin.builtins.AssertionError
+u = py.builtin._totext
# The _reprcompare attribute on the util module is used by the new assertion
# interpretation code and assertion rewriter to detect this plugin was
@@ -10,6 +15,7 @@
# DebugInterpreter.
_reprcompare = None
+
def format_explanation(explanation):
"""This formats an explanation
@@ -20,7 +26,18 @@
for when one explanation needs to span multiple lines, e.g. when
displaying diffs.
"""
- # simplify 'assert False where False = ...'
+ explanation = _collapse_false(explanation)
+ lines = _split_explanation(explanation)
+ result = _format_lines(lines)
+ return u('\n').join(result)
+
+
+def _collapse_false(explanation):
+ """Collapse expansions of False
+
+ So this strips out any "assert False\n{where False = ...\n}"
+ blocks.
+ """
where = 0
while True:
start = where = explanation.find("False\n{False = ", where)
@@ -42,28 +59,48 @@
explanation = (explanation[:start] + explanation[start+15:end-1] +
explanation[end+1:])
where -= 17
- raw_lines = (explanation or '').split('\n')
- # escape newlines not followed by {, } and ~
+ return explanation
+
+
+def _split_explanation(explanation):
+ """Return a list of individual lines in the explanation
+
+ This will return a list of lines split on '\n{', '\n}' and '\n~'.
+ Any other newlines will be escaped and appear in the line as the
+ literal '\n' characters.
+ """
+ raw_lines = (explanation or u('')).split('\n')
lines = [raw_lines[0]]
for l in raw_lines[1:]:
if l.startswith('{') or l.startswith('}') or l.startswith('~'):
lines.append(l)
else:
lines[-1] += '\\n' + l
+ return lines
+
+def _format_lines(lines):
+ """Format the individual lines
+
+ This will replace the '{', '}' and '~' characters of our mini
+ formatting language with the proper 'where ...', 'and ...' and ' +
+ ...' text, taking care of indentation along the way.
+
+ Return a list of formatted lines.
+ """
result = lines[:1]
stack = [0]
stackcnt = [0]
for line in lines[1:]:
if line.startswith('{'):
if stackcnt[-1]:
- s = 'and '
+ s = u('and ')
else:
- s = 'where '
+ s = u('where ')
stack.append(len(result))
stackcnt[-1] += 1
stackcnt.append(0)
- result.append(' +' + ' '*(len(stack)-1) + s + line[1:])
+ result.append(u(' +') + u(' ')*(len(stack)-1) + s + line[1:])
elif line.startswith('}'):
assert line.startswith('}')
stack.pop()
@@ -71,9 +108,9 @@
result[stack[-1]] += line[1:]
else:
assert line.startswith('~')
- result.append(' '*len(stack) + line[1:])
+ result.append(u(' ')*len(stack) + line[1:])
assert len(stack) == 1
- return '\n'.join(result)
+ return result
# Provide basestring in python3
@@ -83,132 +120,163 @@
basestring = str
-def assertrepr_compare(op, left, right):
- """return specialised explanations for some operators/operands"""
- width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
+def assertrepr_compare(config, op, left, right):
+ """Return specialised explanations for some operators/operands"""
+ width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
left_repr = py.io.saferepr(left, maxsize=int(width/2))
right_repr = py.io.saferepr(right, maxsize=width-len(left_repr))
- summary = '%s %s %s' % (left_repr, op, right_repr)
+ summary = u('%s %s %s') % (left_repr, op, right_repr)
- issequence = lambda x: isinstance(x, (list, tuple))
+ issequence = lambda x: (isinstance(x, (list, tuple, Sequence))
+ and not isinstance(x, basestring))
istext = lambda x: isinstance(x, basestring)
isdict = lambda x: isinstance(x, dict)
- isset = lambda x: isinstance(x, set)
+ isset = lambda x: isinstance(x, (set, frozenset))
+ verbose = config.getoption('verbose')
explanation = None
try:
if op == '==':
if istext(left) and istext(right):
- explanation = _diff_text(left, right)
+ explanation = _diff_text(left, right, verbose)
elif issequence(left) and issequence(right):
- explanation = _compare_eq_sequence(left, right)
+ explanation = _compare_eq_sequence(left, right, verbose)
elif isset(left) and isset(right):
- explanation = _compare_eq_set(left, right)
+ explanation = _compare_eq_set(left, right, verbose)
elif isdict(left) and isdict(right):
- explanation = _diff_text(py.std.pprint.pformat(left),
- py.std.pprint.pformat(right))
+ explanation = _compare_eq_dict(left, right, verbose)
elif op == 'not in':
if istext(left) and istext(right):
- explanation = _notin_text(left, right)
- except py.builtin._sysex:
- raise
- except:
+ explanation = _notin_text(left, right, verbose)
+ except Exception:
excinfo = py.code.ExceptionInfo()
- explanation = ['(pytest_assertion plugin: representation of '
- 'details failed. Probably an object has a faulty __repr__.)',
- str(excinfo)
- ]
-
+ explanation = [
+ u('(pytest_assertion plugin: representation of details failed. '
+ 'Probably an object has a faulty __repr__.)'),
+ u(excinfo)]
if not explanation:
return None
- # Don't include pageloads of data, should be configurable
- if len(''.join(explanation)) > 80*8:
- explanation = ['Detailed information too verbose, truncated']
-
return [summary] + explanation
-def _diff_text(left, right):
- """Return the explanation for the diff between text
+def _diff_text(left, right, verbose=False):
+ """Return the explanation for the diff between text or bytes
- This will skip leading and trailing characters which are
- identical to keep the diff minimal.
+ Unless --verbose is used this will skip leading and trailing
+ characters which are identical to keep the diff minimal.
+
+ If the input are bytes they will be safely converted to text.
"""
explanation = []
- i = 0 # just in case left or right has zero length
- for i in range(min(len(left), len(right))):
- if left[i] != right[i]:
- break
- if i > 42:
- i -= 10 # Provide some context
- explanation = ['Skipping %s identical '
- 'leading characters in diff' % i]
- left = left[i:]
- right = right[i:]
- if len(left) == len(right):
- for i in range(len(left)):
- if left[-i] != right[-i]:
+ if isinstance(left, py.builtin.bytes):
+ left = u(repr(left)[1:-1]).replace(r'\n', '\n')
+ if isinstance(right, py.builtin.bytes):
+ right = u(repr(right)[1:-1]).replace(r'\n', '\n')
+ if not verbose:
+ i = 0 # just in case left or right has zero length
+ for i in range(min(len(left), len(right))):
+ if left[i] != right[i]:
break
if i > 42:
- i -= 10 # Provide some context
- explanation += ['Skipping %s identical '
- 'trailing characters in diff' % i]
- left = left[:-i]
- right = right[:-i]
+ i -= 10 # Provide some context
+ explanation = [u('Skipping %s identical leading '
+ 'characters in diff, use -v to show') % i]
+ left = left[i:]
+ right = right[i:]
+ if len(left) == len(right):
+ for i in range(len(left)):
+ if left[-i] != right[-i]:
+ break
+ if i > 42:
+ i -= 10 # Provide some context
+ explanation += [u('Skipping %s identical trailing '
+ 'characters in diff, use -v to show') % i]
+ left = left[:-i]
+ right = right[:-i]
explanation += [line.strip('\n')
for line in py.std.difflib.ndiff(left.splitlines(),
right.splitlines())]
return explanation
-def _compare_eq_sequence(left, right):
+def _compare_eq_sequence(left, right, verbose=False):
explanation = []
for i in range(min(len(left), len(right))):
if left[i] != right[i]:
- explanation += ['At index %s diff: %r != %r' %
- (i, left[i], right[i])]
+ explanation += [u('At index %s diff: %r != %r')
+ % (i, left[i], right[i])]
break
if len(left) > len(right):
- explanation += ['Left contains more items, '
- 'first extra item: %s' % py.io.saferepr(left[len(right)],)]
+ explanation += [u('Left contains more items, first extra item: %s')
+ % py.io.saferepr(left[len(right)],)]
elif len(left) < len(right):
- explanation += ['Right contains more items, '
- 'first extra item: %s' % py.io.saferepr(right[len(left)],)]
- return explanation # + _diff_text(py.std.pprint.pformat(left),
- # py.std.pprint.pformat(right))
+ explanation += [
+ u('Right contains more items, first extra item: %s') %
+ py.io.saferepr(right[len(left)],)]
+ return explanation # + _diff_text(py.std.pprint.pformat(left),
+ # py.std.pprint.pformat(right))
-def _compare_eq_set(left, right):
+def _compare_eq_set(left, right, verbose=False):
explanation = []
diff_left = left - right
diff_right = right - left
if diff_left:
- explanation.append('Extra items in the left set:')
+ explanation.append(u('Extra items in the left set:'))
for item in diff_left:
explanation.append(py.io.saferepr(item))
if diff_right:
- explanation.append('Extra items in the right set:')
+ explanation.append(u('Extra items in the right set:'))
for item in diff_right:
explanation.append(py.io.saferepr(item))
return explanation
-def _notin_text(term, text):
+def _compare_eq_dict(left, right, verbose=False):
+ explanation = []
+ common = set(left).intersection(set(right))
+ same = dict((k, left[k]) for k in common if left[k] == right[k])
+ if same and not verbose:
+ explanation += [u('Omitting %s identical items, use -v to show') %
+ len(same)]
+ elif same:
+ explanation += [u('Common items:')]
+ explanation += py.std.pprint.pformat(same).splitlines()
+ diff = set(k for k in common if left[k] != right[k])
+ if diff:
+ explanation += [u('Differing items:')]
+ for k in diff:
+ explanation += [py.io.saferepr({k: left[k]}) + ' != ' +
+ py.io.saferepr({k: right[k]})]
+ extra_left = set(left) - set(right)
+ if extra_left:
+ explanation.append(u('Left contains more items:'))
+ explanation.extend(py.std.pprint.pformat(
+ dict((k, left[k]) for k in extra_left)).splitlines())
+ extra_right = set(right) - set(left)
+ if extra_right:
+ explanation.append(u('Right contains more items:'))
+ explanation.extend(py.std.pprint.pformat(
+ dict((k, right[k]) for k in extra_right)).splitlines())
+ return explanation
+
+
+def _notin_text(term, text, verbose=False):
index = text.find(term)
head = text[:index]
tail = text[index+len(term):]
correct_text = head + tail
- diff = _diff_text(correct_text, text)
- newdiff = ['%s is contained here:' % py.io.saferepr(term, maxsize=42)]
+ diff = _diff_text(correct_text, text, verbose)
+ newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)]
for line in diff:
- if line.startswith('Skipping'):
+ if line.startswith(u('Skipping')):
continue
- if line.startswith('- '):
+ if line.startswith(u('- ')):
continue
- if line.startswith('+ '):
- newdiff.append(' ' + line[2:])
+ if line.startswith(u('+ ')):
+ newdiff.append(u(' ') + line[2:])
else:
newdiff.append(line)
return newdiff
diff --git a/_pytest/capture.py b/_pytest/capture.py
--- a/_pytest/capture.py
+++ b/_pytest/capture.py
@@ -1,43 +1,114 @@
-""" per-test stdout/stderr capturing mechanisms, ``capsys`` and ``capfd`` function arguments. """
+"""
+ per-test stdout/stderr capturing mechanisms,
+ ``capsys`` and ``capfd`` function arguments.
+"""
+# note: py.io capture was where copied from
+# pylib 1.4.20.dev2 (rev 13d9af95547e)
+import sys
+import os
+import tempfile
-import pytest, py
-import os
+import py
+import pytest
+
+try:
+ from io import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+try:
+ from io import BytesIO
+except ImportError:
+ class BytesIO(StringIO):
+ def write(self, data):
+ if isinstance(data, unicode):
+ raise TypeError("not a byte value: %r" % (data,))
+ StringIO.write(self, data)
+
+if sys.version_info < (3, 0):
+ class TextIO(StringIO):
+ def write(self, data):
+ if not isinstance(data, unicode):
+ enc = getattr(self, '_encoding', 'UTF-8')
+ data = unicode(data, enc, 'replace')
+ StringIO.write(self, data)
+else:
+ TextIO = StringIO
+
+
+patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'}
+
def pytest_addoption(parser):
group = parser.getgroup("general")
- group._addoption('--capture', action="store", default=None,
- metavar="method", type="choice", choices=['fd', 'sys', 'no'],
+ group._addoption(
+ '--capture', action="store", default=None,
+ metavar="method", choices=['fd', 'sys', 'no'],
help="per-test capturing method: one of fd (default)|sys|no.")
- group._addoption('-s', action="store_const", const="no", dest="capture",
+ group._addoption(
+ '-s', action="store_const", const="no", dest="capture",
help="shortcut for --capture=no.")
+
@pytest.mark.tryfirst
-def pytest_cmdline_parse(pluginmanager, args):
- # we want to perform capturing already for plugin/conftest loading
- if '-s' in args or "--capture=no" in args:
- method = "no"
- elif hasattr(os, 'dup') and '--capture=sys' not in args:
+def pytest_load_initial_conftests(early_config, parser, args, __multicall__):
+ ns = parser.parse_known_args(args)
+ method = ns.capture
+ if not method:
method = "fd"
- else:
+ if method == "fd" and not hasattr(os, "dup"):
method = "sys"
capman = CaptureManager(method)
- pluginmanager.register(capman, "capturemanager")
+ early_config.pluginmanager.register(capman, "capturemanager")
+
+ # make sure that capturemanager is properly reset at final shutdown
+ def teardown():
+ try:
+ capman.reset_capturings()
+ except ValueError:
+ pass
+
+ early_config.pluginmanager.add_shutdown(teardown)
+
+ # make sure logging does not raise exceptions at the end
+ def silence_logging_at_shutdown():
+ if "logging" in sys.modules:
+ sys.modules["logging"].raiseExceptions = False
+ early_config.pluginmanager.add_shutdown(silence_logging_at_shutdown)
+
+ # finally trigger conftest loading but while capturing (issue93)
+ capman.resumecapture()
+ try:
+ try:
+ return __multicall__.execute()
+ finally:
+ out, err = capman.suspendcapture()
+ except:
+ sys.stdout.write(out)
+ sys.stderr.write(err)
+ raise
+
def addouterr(rep, outerr):
for secname, content in zip(["out", "err"], outerr):
if content:
rep.sections.append(("Captured std%s" % secname, content))
+
class NoCapture:
def startall(self):
pass
+
def resume(self):
pass
+
def reset(self):
pass
+
def suspend(self):
return "", ""
+
class CaptureManager:
def __init__(self, defaultmethod=None):
self._method2capture = {}
@@ -45,21 +116,23 @@
def _maketempfile(self):
f = py.std.tempfile.TemporaryFile()
- newf = py.io.dupfile(f, encoding="UTF-8")
+ newf = dupfile(f, encoding="UTF-8")
f.close()
return newf
def _makestringio(self):
- return py.io.TextIO()
+ return TextIO()
def _getcapture(self, method):
if method == "fd":
- return py.io.StdCaptureFD(now=False,
- out=self._maketempfile(), err=self._maketempfile()
+ return StdCaptureFD(
+ out=self._maketempfile(),
+ err=self._maketempfile(),
)
elif method == "sys":
- return py.io.StdCapture(now=False,
- out=self._makestringio(), err=self._makestringio()
+ return StdCapture(
+ out=self._makestringio(),
+ err=self._makestringio(),
)
elif method == "no":
return NoCapture()
@@ -74,23 +147,24 @@
method = config._conftest.rget("option_capture", path=fspath)
except KeyError:
method = "fd"
- if method == "fd" and not hasattr(os, 'dup'): # e.g. jython
+ if method == "fd" and not hasattr(os, 'dup'): # e.g. jython
method = "sys"
return method
def reset_capturings(self):
- for name, cap in self._method2capture.items():
+ for cap in self._method2capture.values():
cap.reset()
def resumecapture_item(self, item):
method = self._getmethod(item.config, item.fspath)
if not hasattr(item, 'outerr'):
- item.outerr = ('', '') # we accumulate outerr on the item
+ item.outerr = ('', '') # we accumulate outerr on the item
return self.resumecapture(method)
def resumecapture(self, method=None):
if hasattr(self, '_capturing'):
- raise ValueError("cannot resume, already capturing with %r" %
+ raise ValueError(
+ "cannot resume, already capturing with %r" %
(self._capturing,))
if method is None:
method = self._defaultmethod
@@ -119,30 +193,29 @@
return "", ""
def activate_funcargs(self, pyfuncitem):
- if not hasattr(pyfuncitem, 'funcargs'):
- return
- assert not hasattr(self, '_capturing_funcargs')
- self._capturing_funcargs = capturing_funcargs = []
- for name, capfuncarg in pyfuncitem.funcargs.items():
- if name in ('capsys', 'capfd'):
- capturing_funcargs.append(capfuncarg)
- capfuncarg._start()
+ funcargs = getattr(pyfuncitem, "funcargs", None)
+ if funcargs is not None:
+ for name, capfuncarg in funcargs.items():
+ if name in ('capsys', 'capfd'):
+ assert not hasattr(self, '_capturing_funcarg')
+ self._capturing_funcarg = capfuncarg
+ capfuncarg._start()
def deactivate_funcargs(self):
- capturing_funcargs = getattr(self, '_capturing_funcargs', None)
- if capturing_funcargs is not None:
- while capturing_funcargs:
- capfuncarg = capturing_funcargs.pop()
- capfuncarg._finalize()
- del self._capturing_funcargs
+ capturing_funcarg = getattr(self, '_capturing_funcarg', None)
+ if capturing_funcarg:
+ outerr = capturing_funcarg._finalize()
+ del self._capturing_funcarg
+ return outerr
def pytest_make_collect_report(self, __multicall__, collector):
method = self._getmethod(collector.config, collector.fspath)
try:
self.resumecapture(method)
except ValueError:
- return # recursive collect, XXX refactor capturing
- # to allow for more lightweight recursive capturing
+ # recursive collect, XXX refactor capturing
+ # to allow for more lightweight recursive capturing
+ return
try:
rep = __multicall__.execute()
finally:
@@ -169,46 +242,371 @@
@pytest.mark.tryfirst
def pytest_runtest_makereport(self, __multicall__, item, call):
- self.deactivate_funcargs()
+ funcarg_outerr = self.deactivate_funcargs()
rep = __multicall__.execute()
outerr = self.suspendcapture(item)
- if not rep.passed:
- addouterr(rep, outerr)
+ if funcarg_outerr is not None:
+ outerr = (outerr[0] + funcarg_outerr[0],
+ outerr[1] + funcarg_outerr[1])
+ addouterr(rep, outerr)
if not rep.passed or rep.when == "teardown":
outerr = ('', '')
item.outerr = outerr
return rep
+error_capsysfderror = "cannot use capsys and capfd at the same time"
+
+
def pytest_funcarg__capsys(request):
"""enables capturing of writes to sys.stdout/sys.stderr and makes
captured output available via ``capsys.readouterr()`` method calls
which return a ``(out, err)`` tuple.
"""
- return CaptureFuncarg(py.io.StdCapture)
+ if "capfd" in request._funcargs:
+ raise request.raiseerror(error_capsysfderror)
+ return CaptureFixture(StdCapture)
+
def pytest_funcarg__capfd(request):
"""enables capturing of writes to file descriptors 1 and 2 and makes
captured output available via ``capsys.readouterr()`` method calls
which return a ``(out, err)`` tuple.
"""
+ if "capsys" in request._funcargs:
+ request.raiseerror(error_capsysfderror)
if not hasattr(os, 'dup'):
- py.test.skip("capfd funcarg needs os.dup")
- return CaptureFuncarg(py.io.StdCaptureFD)
+ pytest.skip("capfd funcarg needs os.dup")
+ return CaptureFixture(StdCaptureFD)
-class CaptureFuncarg:
+
+class CaptureFixture:
def __init__(self, captureclass):
- self.capture = captureclass(now=False)
+ self._capture = captureclass()
def _start(self):
- self.capture.startall()
+ self._capture.startall()
def _finalize(self):
- if hasattr(self, 'capture'):
- self.capture.reset()
- del self.capture
+ if hasattr(self, '_capture'):
+ outerr = self._outerr = self._capture.reset()
+ del self._capture
+ return outerr
def readouterr(self):
- return self.capture.readouterr()
+ try:
+ return self._capture.readouterr()
+ except AttributeError:
+ return self._outerr
def close(self):
self._finalize()
+
+
+class FDCapture:
+ """ Capture IO to/from a given os-level filedescriptor. """
+
+ def __init__(self, targetfd, tmpfile=None, patchsys=False):
+ """ save targetfd descriptor, and open a new
+ temporary file there. If no tmpfile is
+ specified a tempfile.Tempfile() will be opened
+ in text mode.
+ """
+ self.targetfd = targetfd
+ if tmpfile is None and targetfd != 0:
+ f = tempfile.TemporaryFile('wb+')
+ tmpfile = dupfile(f, encoding="UTF-8")
+ f.close()
+ self.tmpfile = tmpfile
+ self._savefd = os.dup(self.targetfd)
+ if patchsys:
+ self._oldsys = getattr(sys, patchsysdict[targetfd])
+
+ def start(self):
+ try:
+ os.fstat(self._savefd)
+ except OSError:
+ raise ValueError(
+ "saved filedescriptor not valid, "
+ "did you call start() twice?")
+ if self.targetfd == 0 and not self.tmpfile:
+ fd = os.open(os.devnull, os.O_RDONLY)
+ os.dup2(fd, 0)
+ os.close(fd)
+ if hasattr(self, '_oldsys'):
+ setattr(sys, patchsysdict[self.targetfd], DontReadFromInput())
+ else:
+ os.dup2(self.tmpfile.fileno(), self.targetfd)
+ if hasattr(self, '_oldsys'):
+ setattr(sys, patchsysdict[self.targetfd], self.tmpfile)
+
+ def done(self):
+ """ unpatch and clean up, returns the self.tmpfile (file object)
+ """
+ os.dup2(self._savefd, self.targetfd)
+ os.close(self._savefd)
+ if self.targetfd != 0:
+ self.tmpfile.seek(0)
+ if hasattr(self, '_oldsys'):
+ setattr(sys, patchsysdict[self.targetfd], self._oldsys)
+ return self.tmpfile
+
+ def writeorg(self, data):
+ """ write a string to the original file descriptor
+ """
+ tempfp = tempfile.TemporaryFile()
+ try:
+ os.dup2(self._savefd, tempfp.fileno())
+ tempfp.write(data)
+ finally:
+ tempfp.close()
+
+
+def dupfile(f, mode=None, buffering=0, raising=False, encoding=None):
+ """ return a new open file object that's a duplicate of f
+
+ mode is duplicated if not given, 'buffering' controls
+ buffer size (defaulting to no buffering) and 'raising'
+ defines whether an exception is raised when an incompatible
+ file object is passed in (if raising is False, the file
+ object itself will be returned)
+ """
+ try:
+ fd = f.fileno()
+ mode = mode or f.mode
+ except AttributeError:
+ if raising:
+ raise
+ return f
+ newfd = os.dup(fd)
+ if sys.version_info >= (3, 0):
+ if encoding is not None:
+ mode = mode.replace("b", "")
+ buffering = True
+ return os.fdopen(newfd, mode, buffering, encoding, closefd=True)
+ else:
+ f = os.fdopen(newfd, mode, buffering)
+ if encoding is not None:
+ return EncodedFile(f, encoding)
+ return f
+
+
+class EncodedFile(object):
+ def __init__(self, _stream, encoding):
+ self._stream = _stream
+ self.encoding = encoding
+
+ def write(self, obj):
+ if isinstance(obj, unicode):
+ obj = obj.encode(self.encoding)
+ self._stream.write(obj)
+
+ def writelines(self, linelist):
+ data = ''.join(linelist)
+ self.write(data)
+
+ def __getattr__(self, name):
+ return getattr(self._stream, name)
+
+
+class Capture(object):
+ def reset(self):
+ """ reset sys.stdout/stderr and return captured output as strings. """
+ if hasattr(self, '_reset'):
+ raise ValueError("was already reset")
+ self._reset = True
+ outfile, errfile = self.done(save=False)
+ out, err = "", ""
+ if outfile and not outfile.closed:
+ out = outfile.read()
+ outfile.close()
+ if errfile and errfile != outfile and not errfile.closed:
+ err = errfile.read()
+ errfile.close()
+ return out, err
+
+ def suspend(self):
+ """ return current snapshot captures, memorize tempfiles. """
+ outerr = self.readouterr()
+ outfile, errfile = self.done()
+ return outerr
+
+
+class StdCaptureFD(Capture):
+ """ This class allows to capture writes to FD1 and FD2
+ and may connect a NULL file to FD0 (and prevent
+ reads from sys.stdin). If any of the 0,1,2 file descriptors
+ is invalid it will not be captured.
+ """
+ def __init__(self, out=True, err=True, in_=True, patchsys=True):
+ self._options = {
+ "out": out,
+ "err": err,
+ "in_": in_,
+ "patchsys": patchsys,
+ }
+ self._save()
+
+ def _save(self):
+ in_ = self._options['in_']
+ out = self._options['out']
+ err = self._options['err']
+ patchsys = self._options['patchsys']
+ if in_:
+ try:
+ self.in_ = FDCapture(
+ 0, tmpfile=None,
+ patchsys=patchsys)
+ except OSError:
+ pass
+ if out:
+ tmpfile = None
+ if hasattr(out, 'write'):
+ tmpfile = out
+ try:
+ self.out = FDCapture(
+ 1, tmpfile=tmpfile,
+ patchsys=patchsys)
+ self._options['out'] = self.out.tmpfile
+ except OSError:
+ pass
+ if err:
+ if hasattr(err, 'write'):
+ tmpfile = err
+ else:
+ tmpfile = None
+ try:
+ self.err = FDCapture(
+ 2, tmpfile=tmpfile,
+ patchsys=patchsys)
+ self._options['err'] = self.err.tmpfile
+ except OSError:
+ pass
+
+ def startall(self):
+ if hasattr(self, 'in_'):
+ self.in_.start()
+ if hasattr(self, 'out'):
+ self.out.start()
+ if hasattr(self, 'err'):
+ self.err.start()
+
+ def resume(self):
+ """ resume capturing with original temp files. """
+ self.startall()
+
+ def done(self, save=True):
+ """ return (outfile, errfile) and stop capturing. """
+ outfile = errfile = None
+ if hasattr(self, 'out') and not self.out.tmpfile.closed:
+ outfile = self.out.done()
+ if hasattr(self, 'err') and not self.err.tmpfile.closed:
+ errfile = self.err.done()
+ if hasattr(self, 'in_'):
+ self.in_.done()
+ if save:
+ self._save()
+ return outfile, errfile
+
+ def readouterr(self):
+ """ return snapshot value of stdout/stderr capturings. """
+ out = self._readsnapshot('out')
+ err = self._readsnapshot('err')
+ return out, err
+
+ def _readsnapshot(self, name):
+ if hasattr(self, name):
+ f = getattr(self, name).tmpfile
+ else:
+ return ''
+
+ f.seek(0)
+ res = f.read()
+ enc = getattr(f, "encoding", None)
+ if enc:
+ res = py.builtin._totext(res, enc, "replace")
+ f.truncate(0)
+ f.seek(0)
+ return res
+
+
+class StdCapture(Capture):
+ """ This class allows to capture writes to sys.stdout|stderr "in-memory"
+ and will raise errors on tries to read from sys.stdin. It only
+ modifies sys.stdout|stderr|stdin attributes and does not
+ touch underlying File Descriptors (use StdCaptureFD for that).
+ """
+ def __init__(self, out=True, err=True, in_=True):
+ self._oldout = sys.stdout
+ self._olderr = sys.stderr
+ self._oldin = sys.stdin
+ if out and not hasattr(out, 'file'):
+ out = TextIO()
+ self.out = out
+ if err:
+ if not hasattr(err, 'write'):
+ err = TextIO()
+ self.err = err
+ self.in_ = in_
+
+ def startall(self):
+ if self.out:
+ sys.stdout = self.out
+ if self.err:
+ sys.stderr = self.err
+ if self.in_:
+ sys.stdin = self.in_ = DontReadFromInput()
+
+ def done(self, save=True):
+ """ return (outfile, errfile) and stop capturing. """
+ outfile = errfile = None
+ if self.out and not self.out.closed:
+ sys.stdout = self._oldout
+ outfile = self.out
+ outfile.seek(0)
+ if self.err and not self.err.closed:
+ sys.stderr = self._olderr
+ errfile = self.err
+ errfile.seek(0)
+ if self.in_:
+ sys.stdin = self._oldin
+ return outfile, errfile
+
+ def resume(self):
+ """ resume capturing with original temp files. """
+ self.startall()
+
+ def readouterr(self):
+ """ return snapshot value of stdout/stderr capturings. """
+ out = err = ""
+ if self.out:
+ out = self.out.getvalue()
+ self.out.truncate(0)
+ self.out.seek(0)
+ if self.err:
+ err = self.err.getvalue()
+ self.err.truncate(0)
+ self.err.seek(0)
+ return out, err
+
+
+class DontReadFromInput:
+ """Temporary stub class. Ideally when stdin is accessed, the
+ capturing should be turned off, with possibly all data captured
+ so far sent to the screen. This should be configurable, though,
+ because in automated test runs it is better to crash than
+ hang indefinitely.
+ """
+ def read(self, *args):
+ raise IOError("reading from stdin while output is captured")
+ readline = read
+ readlines = read
+ __iter__ = read
+
+ def fileno(self):
+ raise ValueError("redirected Stdin is pseudofile, has no fileno()")
+
+ def isatty(self):
+ return False
+
+ def close(self):
+ pass
diff --git a/_pytest/config.py b/_pytest/config.py
--- a/_pytest/config.py
+++ b/_pytest/config.py
@@ -1,25 +1,91 @@
""" command line options, ini-file and conftest.py processing. """
import py
+# DON't import pytest here because it causes import cycle troubles
import sys, os
+from _pytest import hookspec # the extension point definitions
from _pytest.core import PluginManager
-import pytest
-def pytest_cmdline_parse(pluginmanager, args):
- config = Config(pluginmanager)
- config.parse(args)
- return config
+# pytest startup
-def pytest_unconfigure(config):
- while 1:
- try:
- fin = config._cleanup.pop()
- except IndexError:
- break
- fin()
+def main(args=None, plugins=None):
+ """ return exit code, after performing an in-process test run.
+
+ :arg args: list of command line arguments.
+
+ :arg plugins: list of plugin objects to be auto-registered during
+ initialization.
+ """
+ config = _prepareconfig(args, plugins)
+ return config.hook.pytest_cmdline_main(config=config)
+
+class cmdline: # compatibility namespace
+ main = staticmethod(main)
+
+class UsageError(Exception):
+ """ error in pytest usage or invocation"""
+
+_preinit = []
+
+default_plugins = (
+ "mark main terminal runner python pdb unittest capture skipping "
+ "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion genscript "
+ "junitxml resultlog doctest").split()
+
+def _preloadplugins():
+ assert not _preinit
+ _preinit.append(get_plugin_manager())
+
+def get_plugin_manager():
+ if _preinit:
+ return _preinit.pop(0)
+ # subsequent calls to main will create a fresh instance
+ pluginmanager = PytestPluginManager()
+ pluginmanager.config = Config(pluginmanager) # XXX attr needed?
+ for spec in default_plugins:
+ pluginmanager.import_plugin(spec)
+ return pluginmanager
+
+def _prepareconfig(args=None, plugins=None):
+ if args is None:
+ args = sys.argv[1:]
+ elif isinstance(args, py.path.local):
+ args = [str(args)]
+ elif not isinstance(args, (tuple, list)):
+ if not isinstance(args, str):
+ raise ValueError("not a string or argument list: %r" % (args,))
+ args = py.std.shlex.split(args)
+ pluginmanager = get_plugin_manager()
+ if plugins:
+ for plugin in plugins:
+ pluginmanager.register(plugin)
+ return pluginmanager.hook.pytest_cmdline_parse(
+ pluginmanager=pluginmanager, args=args)
+
+class PytestPluginManager(PluginManager):
+ def __init__(self, hookspecs=[hookspec]):
+ super(PytestPluginManager, self).__init__(hookspecs=hookspecs)
+ self.register(self)
+ if os.environ.get('PYTEST_DEBUG'):
+ err = sys.stderr
+ encoding = getattr(err, 'encoding', 'utf8')
+ try:
+ err = py.io.dupfile(err, encoding=encoding)
+ except Exception:
+ pass
+ self.trace.root.setwriter(err.write)
+
+ def pytest_configure(self, config):
+ config.addinivalue_line("markers",
+ "tryfirst: mark a hook implementation function such that the "
+ "plugin machinery will try to call it first/as early as possible.")
+ config.addinivalue_line("markers",
+ "trylast: mark a hook implementation function such that the "
+ "plugin machinery will try to call it last/as late as possible.")
+
class Parser:
- """ Parser for command line arguments. """
+ """ Parser for command line arguments and ini-file values. """
def __init__(self, usage=None, processopt=None):
self._anonymous = OptionGroup("custom options", parser=self)
@@ -35,15 +101,17 @@
if option.dest:
self._processopt(option)
- def addnote(self, note):
- self._notes.append(note)
-
def getgroup(self, name, description="", after=None):
""" get (or create) a named option Group.
- :name: unique name of the option group.
+ :name: name of the option group.
:description: long description for --help output.
:after: name of other group, used for ordering --help output.
+
+ The returned group object has an ``addoption`` method with the same
+ signature as :py:func:`parser.addoption
+ <_pytest.config.Parser.addoption>` but will be shown in the
+ respective group in the output of ``pytest. --help``.
"""
for group in self._groups:
if group.name == name:
@@ -57,33 +125,222 @@
return group
def addoption(self, *opts, **attrs):
- """ add an optparse-style option. """
+ """ register a command line option.
+
+ :opts: option names, can be short or long options.
+ :attrs: same attributes which the ``add_option()`` function of the
+ `argparse library
+ `_
+ accepts.
+
+ After command line parsing options are available on the pytest config
+ object via ``config.option.NAME`` where ``NAME`` is usually set
+ by passing a ``dest`` attribute, for example
+ ``addoption("--long", dest="NAME", ...)``.
+ """
self._anonymous.addoption(*opts, **attrs)
def parse(self, args):
- self.optparser = optparser = MyOptionParser(self)
+ from _pytest._argcomplete import try_argcomplete
+ self.optparser = self._getparser()
+ try_argcomplete(self.optparser)
+ return self.optparser.parse_args([str(x) for x in args])
+
+ def _getparser(self):
+ from _pytest._argcomplete import filescompleter
+ optparser = MyOptionParser(self)
groups = self._groups + [self._anonymous]
for group in groups:
if group.options:
desc = group.description or group.name
- optgroup = py.std.optparse.OptionGroup(optparser, desc)
- optgroup.add_options(group.options)
- optparser.add_option_group(optgroup)
- return self.optparser.parse_args([str(x) for x in args])
+ arggroup = optparser.add_argument_group(desc)
+ for option in group.options:
+ n = option.names()
+ a = option.attrs()
+ arggroup.add_argument(*n, **a)
+ # bash like autocompletion for dirs (appending '/')
+ optparser.add_argument(FILE_OR_DIR, nargs='*'
+ ).completer=filescompleter
+ return optparser
def parse_setoption(self, args, option):
- parsedoption, args = self.parse(args)
+ parsedoption = self.parse(args)
for name, value in parsedoption.__dict__.items():
setattr(option, name, value)
- return args
+ return getattr(parsedoption, FILE_OR_DIR)
+
+ def parse_known_args(self, args):
+ optparser = self._getparser()
+ args = [str(x) for x in args]
+ return optparser.parse_known_args(args)[0]
def addini(self, name, help, type=None, default=None):
- """ add an ini-file option with the given name and description. """
+ """ register an ini-file option.
+
+ :name: name of the ini-variable
+ :type: type of the variable, can be ``pathlist``, ``args`` or ``linelist``.
+ :default: default value if no ini-file option exists but is queried.
+
+ The value of ini-variables can be retrieved via a call to
+ :py:func:`config.getini(name) <_pytest.config.Config.getini>`.
+ """
assert type in (None, "pathlist", "args", "linelist")
self._inidict[name] = (help, type, default)
self._ininames.append(name)
+class ArgumentError(Exception):
+ """
+ Raised if an Argument instance is created with invalid or
+ inconsistent arguments.
+ """
+
+ def __init__(self, msg, option):
+ self.msg = msg
+ self.option_id = str(option)
+
+ def __str__(self):
+ if self.option_id:
+ return "option %s: %s" % (self.option_id, self.msg)
+ else:
+ return self.msg
+
+
+class Argument:
+ """class that mimics the necessary behaviour of py.std.optparse.Option """
+ _typ_map = {
+ 'int': int,
+ 'string': str,
+ }
+ # enable after some grace period for plugin writers
+ TYPE_WARN = False
+
+ def __init__(self, *names, **attrs):
+ """store parms in private vars for use in add_argument"""
+ self._attrs = attrs
+ self._short_opts = []
+ self._long_opts = []
+ self.dest = attrs.get('dest')
+ if self.TYPE_WARN:
+ try:
+ help = attrs['help']
+ if '%default' in help:
+ py.std.warnings.warn(
+ 'pytest now uses argparse. "%default" should be'
+ ' changed to "%(default)s" ',
+ FutureWarning,
+ stacklevel=3)
+ except KeyError:
+ pass
+ try:
+ typ = attrs['type']
+ except KeyError:
+ pass
+ else:
+ # this might raise a keyerror as well, don't want to catch that
+ if isinstance(typ, py.builtin._basestring):
+ if typ == 'choice':
+ if self.TYPE_WARN:
+ py.std.warnings.warn(
+ 'type argument to addoption() is a string %r.'
+ ' For parsearg this is optional and when supplied '
+ ' should be a type.'
+ ' (options: %s)' % (typ, names),
+ FutureWarning,
+ stacklevel=3)
+ # argparse expects a type here take it from
+ # the type of the first element
+ attrs['type'] = type(attrs['choices'][0])
+ else:
+ if self.TYPE_WARN:
+ py.std.warnings.warn(
+ 'type argument to addoption() is a string %r.'
+ ' For parsearg this should be a type.'
+ ' (options: %s)' % (typ, names),
+ FutureWarning,
+ stacklevel=3)
+ attrs['type'] = Argument._typ_map[typ]
+ # used in test_parseopt -> test_parse_defaultgetter
+ self.type = attrs['type']
+ else:
+ self.type = typ
+ try:
+ # attribute existence is tested in Config._processopt
+ self.default = attrs['default']
+ except KeyError:
+ pass
+ self._set_opt_strings(names)
+ if not self.dest:
+ if self._long_opts:
+ self.dest = self._long_opts[0][2:].replace('-', '_')
+ else:
+ try:
+ self.dest = self._short_opts[0][1:]
+ except IndexError:
+ raise ArgumentError(
+ 'need a long or short option', self)
+
+ def names(self):
+ return self._short_opts + self._long_opts
+
+ def attrs(self):
+ # update any attributes set by processopt
+ attrs = 'default dest help'.split()
+ if self.dest:
+ attrs.append(self.dest)
+ for attr in attrs:
+ try:
+ self._attrs[attr] = getattr(self, attr)
+ except AttributeError:
+ pass
+ if self._attrs.get('help'):
+ a = self._attrs['help']
+ a = a.replace('%default', '%(default)s')
+ #a = a.replace('%prog', '%(prog)s')
+ self._attrs['help'] = a
+ return self._attrs
+
+ def _set_opt_strings(self, opts):
+ """directly from optparse
+
+ might not be necessary as this is passed to argparse later on"""
+ for opt in opts:
+ if len(opt) < 2:
+ raise ArgumentError(
+ "invalid option string %r: "
+ "must be at least two characters long" % opt, self)
+ elif len(opt) == 2:
+ if not (opt[0] == "-" and opt[1] != "-"):
+ raise ArgumentError(
+ "invalid short option string %r: "
+ "must be of the form -x, (x any non-dash char)" % opt,
+ self)
+ self._short_opts.append(opt)
+ else:
+ if not (opt[0:2] == "--" and opt[2] != "-"):
+ raise ArgumentError(
+ "invalid long option string %r: "
+ "must start with --, followed by non-dash" % opt,
+ self)
+ self._long_opts.append(opt)
+
+ def __repr__(self):
+ retval = 'Argument('
+ if self._short_opts:
+ retval += '_short_opts: ' + repr(self._short_opts) + ', '
+ if self._long_opts:
+ retval += '_long_opts: ' + repr(self._long_opts) + ', '
+ retval += 'dest: ' + repr(self.dest) + ', '
+ if hasattr(self, 'type'):
+ retval += 'type: ' + repr(self.type) + ', '
+ if hasattr(self, 'default'):
+ retval += 'default: ' + repr(self.default) + ', '
+ if retval[-2:] == ', ': # always long enough to test ("Argument(" )
+ retval = retval[:-2]
+ retval += ')'
+ return retval
+
+
class OptionGroup:
def __init__(self, name, description="", parser=None):
self.name = name
@@ -92,12 +349,18 @@
self.parser = parser
def addoption(self, *optnames, **attrs):
- """ add an option to this group. """
- option = py.std.optparse.Option(*optnames, **attrs)
+ """ add an option to this group.
+
+ if a shortened version of a long option is specified it will
From noreply at buildbot.pypy.org Tue Aug 19 10:42:55 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Tue, 19 Aug 2014 10:42:55 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: check for array size in
tests
Message-ID: <20140819084255.764771C0250@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72901:3c4167af9ea2
Date: 2014-08-19 10:15 +0200
http://bitbucket.org/pypy/pypy/changeset/3c4167af9ea2/
Log: check for array size in tests
diff --git a/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py b/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py
--- a/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py
+++ b/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py
@@ -130,6 +130,7 @@
p1 = getarrayitem_gc(ConstPtr(ptr_array_gcref), 0, descr=ptr_array_descr)
i0 = getfield_gc(p1, descr=pinned_obj_my_int_descr)
""")
+ assert len(self.gc_ll_descr.last_moving_obj_tracker._indexes) == 1
def test_simple_getfield_twice(self):
self.check_rewrite("""
@@ -145,3 +146,4 @@
p2 = getarrayitem_gc(ConstPtr(ptr_array_gcref), 1, descr=ptr_array_descr)
i2 = getfield_gc(p2, descr=pinned_obj_my_int_descr)
""")
+ assert len(self.gc_ll_descr.last_moving_obj_tracker._indexes) == 2
From noreply at buildbot.pypy.org Tue Aug 19 10:42:56 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Tue, 19 Aug 2014 10:42:56 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: add test with pinned and
not pinned objects
Message-ID: <20140819084256.963C41C0250@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72902:21551a1f7e19
Date: 2014-08-19 10:40 +0200
http://bitbucket.org/pypy/pypy/changeset/21551a1f7e19/
Log: add test with pinned and not pinned objects
diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py
--- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py
+++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py
@@ -10,6 +10,7 @@
from rpython.rtyper.lltypesystem import lltype
from rpython.rlib.jit import JitDriver, dont_look_inside
from rpython.rlib.jit import elidable, unroll_safe
+from rpython.rlib.jit import promote
from rpython.jit.backend.llsupport.gc import GcLLDescr_framework
from rpython.tool.udir import udir
from rpython.config.translationoption import DEFL_GC
@@ -780,7 +781,6 @@
self.run('compile_framework_call_assembler')
def define_pinned_simple(cls):
- from rpython.rlib.jit import promote
class H:
inst = None
helper = H()
@@ -808,7 +808,6 @@
self.run('pinned_simple')
def define_pinned_unpin(cls):
- from rpython.rlib.jit import promote
class H:
inst = None
pinned = False
@@ -852,3 +851,49 @@
def test_pinned_unpin(self):
self.run('pinned_unpin')
+ def define_multiple_pinned(cls):
+ class H:
+ inst1 = None
+ inst2 = None
+ inst3 = None
+ initialised = False
+ helper = H()
+
+ @dont_look_inside
+ def get_instances():
+ if not helper.initialised:
+ helper.inst1 = X()
+ helper.inst1.x = 101
+ check(rgc.pin(helper.inst1))
+ #
+ helper.inst2 = X()
+ helper.inst2.x = 102
+ #
+ helper.inst3 = X()
+ helper.inst3.x = 103
+ check(rgc.pin(helper.inst3))
+ #
+ helper.initialised = True
+ #
+ check(rgc._is_pinned(helper.inst1))
+ check(not rgc._is_pinned(helper.inst2))
+ check(rgc._is_pinned(helper.inst3))
+ return (helper.inst1, helper.inst2, helper.inst3)
+
+ def fn(n, x, *args):
+ inst1, inst2, inst3 = get_instances()
+ promote(inst1)
+ promote(inst2)
+ promote(inst3)
+ #
+ check(inst1.x == 101)
+ check(inst2.x == 102)
+ check(inst3.x == 103)
+ #
+ n -= 1
+ return (n, x) + args
+
+ return None, fn, None
+
+ def test_multiple_pinned(self):
+ self.run('multiple_pinned')
From noreply at buildbot.pypy.org Tue Aug 19 13:08:25 2014
From: noreply at buildbot.pypy.org (Raemi)
Date: Tue, 19 Aug 2014 13:08:25 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7: remove most should_break_transaction
guards; also add rstm.is_inevitable()
Message-ID: <20140819110825.A1D111D36A6@cobra.cs.uni-duesseldorf.de>
Author: Remi Meier
Branch: stmgc-c7
Changeset: r72903:1a671d185d5f
Date: 2014-08-19 13:08 +0200
http://bitbucket.org/pypy/pypy/changeset/1a671d185d5f/
Log: remove most should_break_transaction guards; also add
rstm.is_inevitable()
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -152,7 +152,7 @@
ec.bytecode_only_trace(self)
else:
ec.bytecode_trace(self)
- rstm.possible_transaction_break()
+ rstm.possible_transaction_break(0)
next_instr = r_uint(self.last_instr)
opcode = ord(co_code[next_instr])
next_instr += 1
diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py
--- a/pypy/module/pypyjit/interp_jit.py
+++ b/pypy/module/pypyjit/interp_jit.py
@@ -85,7 +85,7 @@
ec.bytecode_trace(self, decr_by)
jumpto = r_uint(self.last_instr)
if self.space.threadlocals.threads_running: # quasi-immutable field
- rstm.possible_transaction_break()
+ rstm.possible_transaction_break(1)
#
pypyjitdriver.can_enter_jit(frame=self, ec=ec, next_instr=jumpto,
pycode=self.getcode(),
diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py
--- a/rpython/jit/metainterp/blackhole.py
+++ b/rpython/jit/metainterp/blackhole.py
@@ -908,10 +908,10 @@
return False
- @arguments(returns="i")
- def bhimpl_stm_should_break_transaction():
+ @arguments("i", returns="i")
+ def bhimpl_stm_should_break_transaction(keep):
from rpython.rlib import rstm
- return rstm.should_break_transaction()
+ return rstm.should_break_transaction(0)
@arguments()
def bhimpl_stm_hint_commit_soon():
diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
--- a/rpython/jit/metainterp/pyjitpl.py
+++ b/rpython/jit/metainterp/pyjitpl.py
@@ -187,14 +187,34 @@
# ------------------------------
- @arguments()
- def opimpl_stm_should_break_transaction(self):
- # XXX make it return BoxInt(1) instead of BoxInt(0) if there
- # is an inevitable transaction, because it's likely that there
- # will always be an inevitable transaction here
- resbox = history.BoxInt(0)
- mi = self.metainterp
- mi.history.record(rop.STM_SHOULD_BREAK_TRANSACTION, [], resbox)
+ @arguments("int")
+ def opimpl_stm_should_break_transaction(self, keep):
+ # from rpython.rlib import rstm
+
+ record_break = False
+ resbox = history.ConstInt(0)
+
+ if bool(keep):
+ # always keep (i.c. end of loops)
+ resbox = history.BoxInt(0)
+ record_break = True
+
+ ## XXX: not working yet. we are always inevitable when tracing
+ # if we_are_translated() and rstm.is_inevitable():
+ # # return BoxInt(1) if there is an inevitable
+ # # transaction, because it's likely that there
+ # # will always be an inevitable transaction here
+ # resbox = history.BoxInt(1)
+ # record_break = True
+
+ if record_break:
+ mi = self.metainterp
+ mi.history.record(rop.STM_SHOULD_BREAK_TRANSACTION, [], resbox)
+ else:
+ # don't record the should_break_transaction and optimize
+ # the guard away
+ pass
+
return resbox
@arguments()
diff --git a/rpython/jit/metainterp/test/test_stm.py b/rpython/jit/metainterp/test/test_stm.py
--- a/rpython/jit/metainterp/test/test_stm.py
+++ b/rpython/jit/metainterp/test/test_stm.py
@@ -11,7 +11,7 @@
class STMTests:
def test_simple(self):
def g():
- return rstm.should_break_transaction()
+ return rstm.should_break_transaction(1)
res = self.interp_operations(g, [], translationoptions={"stm":True})
assert res == False
self.check_operations_history(stm_should_break_transaction=1)
diff --git a/rpython/jit/tl/tlc.py b/rpython/jit/tl/tlc.py
--- a/rpython/jit/tl/tlc.py
+++ b/rpython/jit/tl/tlc.py
@@ -15,12 +15,12 @@
def int_o(self): raise TypeError
def to_string(self): raise TypeError
-
+
def add(self, other): raise TypeError
def sub(self, other): raise TypeError
def mul(self, other): raise TypeError
def div(self, other): raise TypeError
-
+
def eq(self, other): raise TypeError
def lt(self, other): raise TypeError
@@ -92,7 +92,7 @@
self.methods = {}
for methname, pc in descr.methods:
self.methods[methname] = pc
-
+
class InstanceObj(Obj):
def __init__(self, cls):
@@ -226,7 +226,7 @@
self.pc = pc
self.stack = []
-
+
def make_interp(supports_call, jitted=True):
myjitdriver = JitDriver(greens = ['pc', 'code'],
reds = ['frame', 'pool'])
@@ -234,7 +234,7 @@
def interp(code='', pc=0, inputarg=0, pool=None):
if not isinstance(code,str):
raise TypeError("code '%s' should be a string" % str(code))
-
+
if pool is None:
pool = ConstantPool()
args = [IntObj(inputarg)]
@@ -255,7 +255,7 @@
if opcode == NOP:
pass
-
+
elif opcode == NIL:
stack.append(nil)
@@ -268,7 +268,7 @@
elif opcode == CDR:
stack.append(stack.pop().cdr())
-
+
elif opcode == PUSH:
stack.append(IntObj(char2int(code[pc])))
pc += 1
@@ -349,32 +349,32 @@
pc += char2int(code[pc])
pc += 1
if jitted and old_pc > pc:
- rstm.possible_transaction_break()
+ rstm.possible_transaction_break(1)
myjitdriver.can_enter_jit(code=code, pc=pc, frame=frame,
pool=pool)
-
+
elif opcode == BR_COND:
cond = stack.pop()
if cond.t():
old_pc = pc
pc += char2int(code[pc]) + 1
if jitted and old_pc > pc:
- rstm.possible_transaction_break()
+ rstm.possible_transaction_break(1)
myjitdriver.can_enter_jit(code=code, pc=pc, frame=frame,
pool=pool)
else:
pc += 1
-
+
elif opcode == BR_COND_STK:
offset = stack.pop().int_o()
if stack.pop().t():
old_pc = pc
pc += offset
if jitted and old_pc > pc:
- rstm.possible_transaction_break()
+ rstm.possible_transaction_break(1)
myjitdriver.can_enter_jit(code=code, pc=pc, frame=frame,
pool=pool)
-
+
elif supports_call and opcode == CALL:
offset = char2int(code[pc])
@@ -451,7 +451,7 @@
return frame.stack[-1]
else:
return None
-
+
return interp, interp_eval
diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py
--- a/rpython/rlib/rstm.py
+++ b/rpython/rlib/rstm.py
@@ -49,9 +49,13 @@
# special-cased below: the emitted operation must be placed
# directly in the caller's graph
-def possible_transaction_break():
+ at specialize.arg(0)
+def possible_transaction_break(keep):
+ """ keep: should be True for checks that are absolutely
+ needed. False means the JIT only keeps the check if it
+ thinks that it helps """
if stm_is_enabled():
- if llop.stm_should_break_transaction(lltype.Bool):
+ if llop.stm_should_break_transaction(lltype.Bool, keep):
break_transaction()
def hint_commit_soon():
@@ -70,9 +74,10 @@
def partial_commit_and_resume_other_threads():
pass # for now
-def should_break_transaction():
+ at specialize.arg(0)
+def should_break_transaction(keep):
return we_are_translated() and (
- llop.stm_should_break_transaction(lltype.Bool))
+ llop.stm_should_break_transaction(lltype.Bool, keep))
@dont_look_inside
def break_transaction():
@@ -95,6 +100,10 @@
return llop.stm_get_atomic(lltype.Signed)
@dont_look_inside
+def is_inevitable():
+ return llop.stm_is_inevitable(lltype.Signed)
+
+ at dont_look_inside
def abort_and_retry():
llop.stm_abort_and_retry(lltype.Void)
diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py
--- a/rpython/rtyper/llinterp.py
+++ b/rpython/rtyper/llinterp.py
@@ -956,6 +956,7 @@
op_stm_enter_callback_call = _stm_not_implemented
op_stm_leave_callback_call = _stm_not_implemented
op_stm_get_atomic = _stm_not_implemented
+ op_stm_is_inevitable = _stm_not_implemented
op_stm_change_atomic = _stm_not_implemented
op_stm_set_transaction_length = _stm_not_implemented
op_stm_hash = _stm_not_implemented
@@ -970,7 +971,7 @@
op_stm_stop_all_other_threads = _stm_not_implemented
op_stm_partial_commit_and_resume_other_threads = _stm_not_implemented
- def op_stm_should_break_transaction(self):
+ def op_stm_should_break_transaction(self, keep):
return False
def op_threadlocalref_set(self, key, value):
diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py
--- a/rpython/rtyper/lltypesystem/lloperation.py
+++ b/rpython/rtyper/lltypesystem/lloperation.py
@@ -445,6 +445,8 @@
'stm_decrement_atomic': LLOp(),
'stm_get_atomic': LLOp(sideeffects=False),
+ 'stm_is_inevitable': LLOp(sideeffects=False),
+
'stm_ignored_start': LLOp(canrun=True),
'stm_ignored_stop': LLOp(canrun=True),
diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py
--- a/rpython/translator/stm/funcgen.py
+++ b/rpython/translator/stm/funcgen.py
@@ -190,6 +190,10 @@
result = funcgen.expr(op.result)
return '%s = pypy_stm_get_atomic();' % (result,)
+def stm_is_inevitable(funcgen, op):
+ result = funcgen.expr(op.result)
+ return '%s = stm_is_inevitable();' % (result,)
+
def stm_abort_and_retry(funcgen, op):
return 'stm_abort_transaction();'
From noreply at buildbot.pypy.org Tue Aug 19 13:16:24 2014
From: noreply at buildbot.pypy.org (Raemi)
Date: Tue, 19 Aug 2014 13:16:24 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7: add is_atomic to atomic.py and
transaction.py
Message-ID: <20140819111624.BA9631C1482@cobra.cs.uni-duesseldorf.de>
Author: Remi Meier
Branch: stmgc-c7
Changeset: r72904:0e51cd5f2023
Date: 2014-08-19 13:16 +0200
http://bitbucket.org/pypy/pypy/changeset/0e51cd5f2023/
Log: add is_atomic to atomic.py and transaction.py
diff --git a/lib_pypy/atomic.py b/lib_pypy/atomic.py
--- a/lib_pypy/atomic.py
+++ b/lib_pypy/atomic.py
@@ -5,7 +5,8 @@
try:
from __pypy__ import thread as _thread
- from __pypy__.thread import atomic, getsegmentlimit, hint_commit_soon
+ from __pypy__.thread import (atomic, getsegmentlimit,
+ hint_commit_soon, is_atomic)
except ImportError:
# Not a STM-enabled PyPy. We can still provide a version of 'atomic'
# that is good enough for our purposes. With this limited version,
@@ -22,6 +23,10 @@
def hint_commit_soon():
pass
+ def is_atomic():
+ return atomic.locked()
+
+
else:
import re, sys, linecache
diff --git a/lib_pypy/transaction.py b/lib_pypy/transaction.py
--- a/lib_pypy/transaction.py
+++ b/lib_pypy/transaction.py
@@ -15,13 +15,15 @@
import sys, thread, collections, cStringIO, linecache
try:
- from __pypy__.thread import atomic
+ from __pypy__.thread import atomic, is_atomic
except ImportError:
# Not a STM-enabled PyPy. We can use a regular lock for 'atomic',
# which is good enough for our purposes. With this limited version,
# an atomic block in thread X will not prevent running thread Y, if
# thread Y is not within an atomic block at all.
atomic = thread.allocate_lock()
+ def is_atomic():
+ return atomic.locked()
try:
from __pypy__.thread import signals_enabled
From noreply at buildbot.pypy.org Tue Aug 19 13:26:39 2014
From: noreply at buildbot.pypy.org (Raemi)
Date: Tue, 19 Aug 2014 13:26:39 +0200 (CEST)
Subject: [pypy-commit] benchmarks default: removing weird use of atomic in
quick_sort.py (actually faster now without it)
Message-ID: <20140819112639.989851C1482@cobra.cs.uni-duesseldorf.de>
Author: Remi Meier
Branch:
Changeset: r274:0ff2a5c67f95
Date: 2014-08-19 13:26 +0200
http://bitbucket.org/pypy/benchmarks/changeset/0ff2a5c67f95/
Log: removing weird use of atomic in quick_sort.py (actually faster now
without it)
diff --git a/multithread/quick_sort/quick_sort.py b/multithread/quick_sort/quick_sort.py
--- a/multithread/quick_sort/quick_sort.py
+++ b/multithread/quick_sort/quick_sort.py
@@ -8,6 +8,7 @@
atomic, Future, set_thread_pool, ThreadPool,
hint_commit_soon, print_abort_info)
+
import itertools
from collections import deque
@@ -50,59 +51,52 @@
l = l0
r = l + n - 1
while l <= r:
- with atomic:
- xl = xs[l]
- if xl < pivot:
- l += 1
- continue
- xr = xs[r]
- if xr > pivot:
- r -= 1
- continue
- xs[l], xs[r] = xr, xl
+ xl = xs[l]
+ if xl < pivot:
l += 1
+ continue
+ xr = xs[r]
+ if xr > pivot:
r -= 1
+ continue
+ xs[l], xs[r] = xr, xl
+ l += 1
+ r -= 1
fs = []
- # only start futures on a single level:
- do_futures = level == 4
+ do_futures = level >= 4
largs = (xs, l0, r - l0 + 1, level+1)
rargs = (xs, l, l0 + n - l, level+1)
leftf, rightf = False, False
if do_futures:
- if largs[2] > 2000:
+ if largs[2] > 1500:
fs.append(Future(qsort_f, *largs))
leftf = True
- if rargs[2] > 2000:
+ if rargs[2] > 1500:
fs.append(Future(qsort_f, *rargs))
rightf = True
if not leftf:
- if level >= 4 and largs[2] < 500:
- with atomic:
- fs.extend(qsort_f(*largs))
- else:
- fs.extend(qsort_f(*largs))
+ fs.extend(qsort_f(*largs))
if not rightf:
- if level >= 4 and rargs[2] < 500:
- with atomic:
- fs.extend(qsort_f(*rargs))
- else:
- fs.extend(qsort_f(*rargs))
+ fs.extend(qsort_f(*rargs))
#print_abort_info(0.0000001)
return fs
def wait_for_futures(fs):
+ c = 0
while fs:
f = fs.pop()
fs.extend(f())
+ c += 1
+ print "Futures:", c
-def run(threads=2, n=20000):
+def run(threads=2, n=60000):
threads = int(threads)
n = int(n)
@@ -110,11 +104,10 @@
to_sort = range(n)
t = 0
- for i in range(20):
- with atomic:
- random.seed(i)
- random.shuffle(to_sort)
- s = deque(to_sort)
+ for i in range(5):
+ random.seed(i+32)
+ random.shuffle(to_sort)
+ s = deque(to_sort)
# qsort(s, 0, len(s))
t -= time.time()
From noreply at buildbot.pypy.org Tue Aug 19 13:26:41 2014
From: noreply at buildbot.pypy.org (Raemi)
Date: Tue, 19 Aug 2014 13:26:41 +0200 (CEST)
Subject: [pypy-commit] benchmarks default: miscellaneous changes
Message-ID: <20140819112641.0B7BB1C1482@cobra.cs.uni-duesseldorf.de>
Author: Remi Meier
Branch:
Changeset: r275:c45103668651
Date: 2014-08-19 13:26 +0200
http://bitbucket.org/pypy/benchmarks/changeset/c45103668651/
Log: miscellaneous changes
diff --git a/multithread/bench.py b/multithread/bench.py
--- a/multithread/bench.py
+++ b/multithread/bench.py
@@ -86,6 +86,7 @@
finally:
if not args.q:
print "== times ==\n", "\n".join(map(str, times))
+ print "== times short ==\n", str(times[-min(5, len(times)):])
print "== reported results ==\n", "\n".join(
map(str, filter(None, results)))
diff --git a/multithread/common/abstract_threading.py b/multithread/common/abstract_threading.py
--- a/multithread/common/abstract_threading.py
+++ b/multithread/common/abstract_threading.py
@@ -4,7 +4,7 @@
try:
from atomic import (atomic, getsegmentlimit, print_abort_info,
- hint_commit_soon)
+ hint_commit_soon, is_atomic)
except:
atomic = RLock()
def getsegmentlimit():
@@ -13,6 +13,8 @@
pass
def hint_commit_soon():
pass
+ def is_atomic():
+ return atomic._RLock__count > 0
class TLQueue_concurrent(object):
diff --git a/multithread/raytrace/raytrace.py b/multithread/raytrace/raytrace.py
--- a/multithread/raytrace/raytrace.py
+++ b/multithread/raytrace/raytrace.py
@@ -7,15 +7,15 @@
print_abort_info, hint_commit_soon)
import time
-import platform
-if platform.python_implementation() == "Jython":
- # be fair to jython and don't use a lock where none is required:
- class fakeatomic:
- def __enter__(self):
- pass
- def __exit__(self,*args):
- pass
- atomic = fakeatomic()
+#import platform
+#if platform.python_implementation() == "Jython":
+# # be fair to jython and don't use a lock where none is required:
+# class fakeatomic:
+# def __enter__(self):
+# pass
+# def __exit__(self,*args):
+# pass
+# atomic = fakeatomic()
AMBIENT = 0.1
From noreply at buildbot.pypy.org Tue Aug 19 13:51:56 2014
From: noreply at buildbot.pypy.org (Raemi)
Date: Tue, 19 Aug 2014 13:51:56 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7: found a new TODO which would allow
pypy-stm to outperform pypy on quick_sort.py
Message-ID: <20140819115156.2CD421C1486@cobra.cs.uni-duesseldorf.de>
Author: Remi Meier
Branch: stmgc-c7
Changeset: r72905:29fff73e1c07
Date: 2014-08-19 13:51 +0200
http://bitbucket.org/pypy/pypy/changeset/29fff73e1c07/
Log: found a new TODO which would allow pypy-stm to outperform pypy on
quick_sort.py
diff --git a/TODO b/TODO
--- a/TODO
+++ b/TODO
@@ -28,6 +28,14 @@
------------------------------------------------------------
+stm_read() should also be optimized (moved out of loops). E.g.
+quick_sort.py suffers a 30% slowdown because it uses deque.locate()
+extensively. This method happens to have a *very* tight loop
+where the read barrier has a big negative effect and could
+actually be moved out.
+
+------------------------------------------------------------
+
__pypy__.thread.getsegmentlimit():
XXX This limit is so far a compile time option (STM_NB_SEGMENTS in
From noreply at buildbot.pypy.org Tue Aug 19 15:14:49 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Tue, 19 Aug 2014 15:14:49 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: failing test: malloc
object of type A, pin it, do GC collect and try to malloc object of type B.
Message-ID: <20140819131449.A2B171C148A@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72906:20eefbef7004
Date: 2014-08-19 15:13 +0200
http://bitbucket.org/pypy/pypy/changeset/20eefbef7004/
Log: failing test: malloc object of type A, pin it, do GC collect and try
to malloc object of type B.
diff --git a/rpython/memory/gc/test/test_object_pinning.py b/rpython/memory/gc/test/test_object_pinning.py
--- a/rpython/memory/gc/test/test_object_pinning.py
+++ b/rpython/memory/gc/test/test_object_pinning.py
@@ -4,11 +4,15 @@
from test_direct import BaseDirectGCTest
S = lltype.GcForwardReference()
-S.become(lltype.GcStruct('pinning_test_struct',
+S.become(lltype.GcStruct('pinning_test_struct1',
('someInt', lltype.Signed),
('next', lltype.Ptr(S)),
('data', lltype.Ptr(S))))
+T = lltype.GcForwardReference()
+T.become(lltype.GcStruct('pinning_test_struct2',
+ ('someInt', lltype.Signed)))
+
class PinningGCTest(BaseDirectGCTest):
def test_pin_can_move(self):
@@ -545,6 +549,21 @@
self.pin_shadow_1(self.gc.collect)
+ def test_malloc_different_types(self):
+ # scenario: malloc two objects of different type and pin them. Do a
+ # minor and major collection in between. This test showed a bug that was
+ # present in a previous implementation of pinning.
+ obj1 = self.malloc(S)
+ self.stackroots.append(obj1)
+ assert self.gc.pin(llmemory.cast_ptr_to_adr(obj1))
+ #
+ self.gc.collect()
+ #
+ obj2 = self.malloc(T)
+ self.stackroots.append(obj2)
+ assert self.gc.pin(llmemory.cast_ptr_to_adr(obj2))
+
+
def pin_shadow_2(self, collect_func):
ptr = self.malloc(S)
adr = llmemory.cast_ptr_to_adr(ptr)
From noreply at buildbot.pypy.org Tue Aug 19 16:35:46 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Tue, 19 Aug 2014 16:35:46 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: fix for tests
'test_malloc_different_types' and 'test_tagged_id'. fijal solved it.
Message-ID: <20140819143546.E46681C1482@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72907:887cbc99da9b
Date: 2014-08-19 16:00 +0200
http://bitbucket.org/pypy/pypy/changeset/887cbc99da9b/
Log: fix for tests 'test_malloc_different_types' and 'test_tagged_id'.
fijal solved it.
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -617,7 +617,7 @@
# Get the memory from the nursery. If there is not enough space
# there, do a collect first.
result = self.nursery_free
- self.nursery_free = result + totalsize
+ self.nursery_free = result + rawtotalsize
if self.nursery_free > self.nursery_top:
result = self.collect_and_reserve(result, totalsize)
#
From noreply at buildbot.pypy.org Tue Aug 19 16:35:48 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Tue, 19 Aug 2014 16:35:48 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: don't allow objects to
be pinned that can contain GC pointers
Message-ID: <20140819143548.3CBD41C1482@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72908:e87564c16230
Date: 2014-08-19 16:24 +0200
http://bitbucket.org/pypy/pypy/changeset/e87564c16230/
Log: don't allow objects to be pinned that can contain GC pointers
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -988,6 +988,12 @@
# to check if can_move(obj) already returns True in which
# case a call to pin() is unnecessary.
return False
+ if self.has_gcptr(self.get_type_id(obj)):
+ # objects containing GC pointers can't be pinned. If we would add
+ # it, we would have to track all pinned objects and trace them
+ # every minor collection to make sure the referenced object are
+ # kept alive.
+ return False
if self._is_pinned(obj):
# Already pinned, we do not allow to pin it again.
# Reason: It would be possible that the first caller unpins
diff --git a/rpython/memory/gc/test/test_object_pinning.py b/rpython/memory/gc/test/test_object_pinning.py
--- a/rpython/memory/gc/test/test_object_pinning.py
+++ b/rpython/memory/gc/test/test_object_pinning.py
@@ -3,29 +3,29 @@
from rpython.memory.gc.incminimark import IncrementalMiniMarkGC, WORD
from test_direct import BaseDirectGCTest
+T = lltype.GcForwardReference()
+T.become(lltype.GcStruct('pinning_test_struct2',
+ ('someInt', lltype.Signed)))
+
S = lltype.GcForwardReference()
S.become(lltype.GcStruct('pinning_test_struct1',
('someInt', lltype.Signed),
- ('next', lltype.Ptr(S)),
- ('data', lltype.Ptr(S))))
-
-T = lltype.GcForwardReference()
-T.become(lltype.GcStruct('pinning_test_struct2',
- ('someInt', lltype.Signed)))
+ ('next', lltype.Ptr(T)),
+ ('data', lltype.Ptr(T))))
class PinningGCTest(BaseDirectGCTest):
def test_pin_can_move(self):
# even a pinned object is considered to be movable. Only the caller
# of pin() knows if it is currently movable or not.
- ptr = self.malloc(S)
+ ptr = self.malloc(T)
adr = llmemory.cast_ptr_to_adr(ptr)
assert self.gc.can_move(adr)
assert self.gc.pin(adr)
assert self.gc.can_move(adr)
def test_pin_twice(self):
- ptr = self.malloc(S)
+ ptr = self.malloc(T)
adr = llmemory.cast_ptr_to_adr(ptr)
assert self.gc.pin(adr)
assert not self.gc.pin(adr)
@@ -37,7 +37,7 @@
self.gc.unpin, llmemory.cast_ptr_to_adr(ptr))
def test__is_pinned(self):
- ptr = self.malloc(S)
+ ptr = self.malloc(T)
adr = llmemory.cast_ptr_to_adr(ptr)
assert not self.gc._is_pinned(adr)
assert self.gc.pin(adr)
@@ -46,7 +46,7 @@
assert not self.gc._is_pinned(adr)
def test_prebuilt_not_pinnable(self):
- ptr = lltype.malloc(S, immortal=True)
+ ptr = lltype.malloc(T, immortal=True)
self.consider_constant(ptr)
assert not self.gc.pin(llmemory.cast_ptr_to_adr(ptr))
self.gc.collect()
@@ -59,6 +59,13 @@
from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass
from rpython.memory.gc.incminimark import STATE_SCANNING
+ def test_try_pin_gcref_containing_type(self):
+ # scenario: incminimark's object pinning can't pin objects that may
+ # contain GC pointers
+ obj = self.malloc(S)
+ assert not self.gc.pin(llmemory.cast_ptr_to_adr(obj))
+
+
def test_pin_old(self):
# scenario: try pinning an old object. This should be not possible and
# we want to make sure everything stays as it is.
@@ -78,11 +85,11 @@
def pin_pin_pinned_object_count(self, collect_func):
# scenario: pin two objects that are referenced from stackroots. Check
# if the pinned objects count is correct, even after an other collection
- pinned1_ptr = self.malloc(S)
+ pinned1_ptr = self.malloc(T)
pinned1_ptr.someInt = 100
self.stackroots.append(pinned1_ptr)
#
- pinned2_ptr = self.malloc(S)
+ pinned2_ptr = self.malloc(T)
pinned2_ptr.someInt = 200
self.stackroots.append(pinned2_ptr)
#
@@ -105,7 +112,7 @@
def pin_unpin_pinned_object_count(self, collect_func):
# scenario: pin an object and check the pinned object count. Unpin it
# and check the count again.
- pinned_ptr = self.malloc(S)
+ pinned_ptr = self.malloc(T)
pinned_ptr.someInt = 100
self.stackroots.append(pinned_ptr)
pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr)
@@ -131,7 +138,7 @@
# scenario: a pinned object that is part of the stack roots. Check if
# it is not moved
#
- ptr = self.malloc(S)
+ ptr = self.malloc(T)
ptr.someInt = 100
self.stackroots.append(ptr)
assert self.stackroots[0] == ptr # validate our assumption
@@ -162,7 +169,7 @@
# that we do stepwise major collection and check in each step for
# a correct state
#
- ptr = self.malloc(S)
+ ptr = self.malloc(T)
ptr.someInt = 100
self.stackroots.append(ptr)
assert self.stackroots[0] == ptr # validate our assumption
@@ -199,7 +206,7 @@
# scenario: test if the pinned object is moved after being unpinned.
# the second part of the scenario is the tested one. The first part
# is already tests by other tests.
- ptr = self.malloc(S)
+ ptr = self.malloc(T)
ptr.someInt = 100
self.stackroots.append(ptr)
assert self.stackroots[0] == ptr # validate our assumption
@@ -246,7 +253,7 @@
assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(old_ptr))
#
# create young pinned one and let the old one reference the young one
- pinned_ptr = self.malloc(S)
+ pinned_ptr = self.malloc(T)
pinned_ptr.someInt = 100
self.write(old_ptr, 'next', pinned_ptr)
pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr)
@@ -283,7 +290,7 @@
assert not self.gc.is_in_nursery(old_adr)
#
# create young pinned one and let the old one reference the young one
- pinned_ptr = self.malloc(S)
+ pinned_ptr = self.malloc(T)
pinned_ptr.someInt = 100
self.write(old_ptr, 'next', pinned_ptr)
pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr)
@@ -329,7 +336,7 @@
collect_func() # make it old
old_ptr = self.stackroots[0]
#
- pinned_ptr = self.malloc(S)
+ pinned_ptr = self.malloc(T)
pinned_ptr.someInt = 100
self.write(old_ptr, 'next', pinned_ptr)
pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr)
@@ -342,7 +349,7 @@
assert self.gc.is_in_nursery(pinned_adr)
assert self.gc._is_pinned(pinned_adr)
# remove the reference
- self.write(old_ptr, 'next', lltype.nullptr(S))
+ self.write(old_ptr, 'next', lltype.nullptr(T))
# from now on the pinned object is dead. Do a collection and make sure
# old object still there and the pinned one is gone.
collect_func()
@@ -377,7 +384,7 @@
collect_func()
old_ptr = self.stackroots[0]
#
- pinned_ptr = self.malloc(S)
+ pinned_ptr = self.malloc(T)
pinned_ptr.someInt = 100
self.write(old_ptr, 'next', pinned_ptr)
assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned_ptr))
@@ -424,7 +431,7 @@
self.stackroots.append(root_ptr)
assert self.stackroots[0] == root_ptr # validate assumption
#
- pinned_ptr = self.malloc(S)
+ pinned_ptr = self.malloc(T)
pinned_ptr.someInt = 100
self.write(root_ptr, 'next', pinned_ptr)
pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr)
@@ -467,7 +474,7 @@
prebuilt_adr = llmemory.cast_ptr_to_adr(prebuilt_ptr)
collect_func()
#
- pinned_ptr = self.malloc(S)
+ pinned_ptr = self.malloc(T)
pinned_ptr.someInt = 100
self.write(prebuilt_ptr, 'next', pinned_ptr)
pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr)
@@ -506,7 +513,7 @@
old2_ptr.someInt = 800
self.stackroots.append(old2_ptr)
- pinned_ptr = self.malloc(S)
+ pinned_ptr = self.malloc(T)
pinned_ptr.someInt = 100
assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned_ptr))
@@ -528,7 +535,7 @@
def pin_shadow_1(self, collect_func):
- ptr = self.malloc(S)
+ ptr = self.malloc(T)
adr = llmemory.cast_ptr_to_adr(ptr)
self.stackroots.append(ptr)
ptr.someInt = 100
@@ -553,7 +560,7 @@
# scenario: malloc two objects of different type and pin them. Do a
# minor and major collection in between. This test showed a bug that was
# present in a previous implementation of pinning.
- obj1 = self.malloc(S)
+ obj1 = self.malloc(T)
self.stackroots.append(obj1)
assert self.gc.pin(llmemory.cast_ptr_to_adr(obj1))
#
@@ -565,7 +572,7 @@
def pin_shadow_2(self, collect_func):
- ptr = self.malloc(S)
+ ptr = self.malloc(T)
adr = llmemory.cast_ptr_to_adr(ptr)
self.stackroots.append(ptr)
ptr.someInt = 100
@@ -587,19 +594,19 @@
def test_pin_nursery_top_scenario1(self):
- ptr1 = self.malloc(S)
+ ptr1 = self.malloc(T)
adr1 = llmemory.cast_ptr_to_adr(ptr1)
ptr1.someInt = 101
self.stackroots.append(ptr1)
assert self.gc.pin(adr1)
- ptr2 = self.malloc(S)
+ ptr2 = self.malloc(T)
adr2 = llmemory.cast_ptr_to_adr(ptr2)
ptr2.someInt = 102
self.stackroots.append(ptr2)
assert self.gc.pin(adr2)
- ptr3 = self.malloc(S)
+ ptr3 = self.malloc(T)
adr3 = llmemory.cast_ptr_to_adr(ptr3)
ptr3.someInt = 103
self.stackroots.append(ptr3)
@@ -625,19 +632,19 @@
def test_pin_nursery_top_scenario2(self):
- ptr1 = self.malloc(S)
+ ptr1 = self.malloc(T)
adr1 = llmemory.cast_ptr_to_adr(ptr1)
ptr1.someInt = 101
self.stackroots.append(ptr1)
assert self.gc.pin(adr1)
- ptr2 = self.malloc(S)
+ ptr2 = self.malloc(T)
adr2 = llmemory.cast_ptr_to_adr(ptr2)
ptr2.someInt = 102
self.stackroots.append(ptr2)
assert self.gc.pin(adr2)
- ptr3 = self.malloc(S)
+ ptr3 = self.malloc(T)
adr3 = llmemory.cast_ptr_to_adr(ptr3)
ptr3.someInt = 103
self.stackroots.append(ptr3)
@@ -665,19 +672,19 @@
def test_pin_nursery_top_scenario3(self):
- ptr1 = self.malloc(S)
+ ptr1 = self.malloc(T)
adr1 = llmemory.cast_ptr_to_adr(ptr1)
ptr1.someInt = 101
self.stackroots.append(ptr1)
assert self.gc.pin(adr1)
- ptr2 = self.malloc(S)
+ ptr2 = self.malloc(T)
adr2 = llmemory.cast_ptr_to_adr(ptr2)
ptr2.someInt = 102
self.stackroots.append(ptr2)
assert self.gc.pin(adr2)
- ptr3 = self.malloc(S)
+ ptr3 = self.malloc(T)
adr3 = llmemory.cast_ptr_to_adr(ptr3)
ptr3.someInt = 103
self.stackroots.append(ptr3)
@@ -707,19 +714,19 @@
def test_pin_nursery_top_scenario4(self):
- ptr1 = self.malloc(S)
+ ptr1 = self.malloc(T)
adr1 = llmemory.cast_ptr_to_adr(ptr1)
ptr1.someInt = 101
self.stackroots.append(ptr1)
assert self.gc.pin(adr1)
- ptr2 = self.malloc(S)
+ ptr2 = self.malloc(T)
adr2 = llmemory.cast_ptr_to_adr(ptr2)
ptr2.someInt = 102
self.stackroots.append(ptr2)
assert self.gc.pin(adr2)
- ptr3 = self.malloc(S)
+ ptr3 = self.malloc(T)
adr3 = llmemory.cast_ptr_to_adr(ptr3)
ptr3.someInt = 103
self.stackroots.append(ptr3)
@@ -750,19 +757,19 @@
def test_pin_nursery_top_scenario5(self):
- ptr1 = self.malloc(S)
+ ptr1 = self.malloc(T)
adr1 = llmemory.cast_ptr_to_adr(ptr1)
ptr1.someInt = 101
self.stackroots.append(ptr1)
assert self.gc.pin(adr1)
- ptr2 = self.malloc(S)
+ ptr2 = self.malloc(T)
adr2 = llmemory.cast_ptr_to_adr(ptr2)
ptr2.someInt = 102
self.stackroots.append(ptr2)
assert self.gc.pin(adr2)
- ptr3 = self.malloc(S)
+ ptr3 = self.malloc(T)
adr3 = llmemory.cast_ptr_to_adr(ptr3)
ptr3.someInt = 103
self.stackroots.append(ptr3)
@@ -811,12 +818,12 @@
def fill_nursery_with_pinned_objects(self):
- typeid = self.get_type_id(S)
+ typeid = self.get_type_id(T)
size = self.gc.fixed_size(typeid) + self.gc.gcheaderbuilder.size_gc_header
raw_size = llmemory.raw_malloc_usage(size)
object_mallocs = self.gc.nursery_size // raw_size
for instance_nr in xrange(object_mallocs):
- ptr = self.malloc(S)
+ ptr = self.malloc(T)
adr = llmemory.cast_ptr_to_adr(ptr)
ptr.someInt = 100 + instance_nr
self.stackroots.append(ptr)
@@ -824,9 +831,9 @@
def test_full_pinned_nursery_pin_fail(self):
self.fill_nursery_with_pinned_objects()
- # nursery should be full now, at least no space for another `S`.
+ # nursery should be full now, at least no space for another `T`.
# Next malloc should fail.
- py.test.raises(Exception, self.malloc, S)
+ py.test.raises(Exception, self.malloc, T)
def test_full_pinned_nursery_arena_reset(self):
# there were some bugs regarding the 'arena_reset()' calls at
@@ -836,21 +843,21 @@
def test_pinning_limit(self):
for instance_nr in xrange(self.gc.max_number_of_pinned_objects):
- ptr = self.malloc(S)
+ ptr = self.malloc(T)
adr = llmemory.cast_ptr_to_adr(ptr)
ptr.someInt = 100 + instance_nr
self.stackroots.append(ptr)
self.gc.pin(adr)
#
# now we reached the maximum amount of pinned objects
- ptr = self.malloc(S)
+ ptr = self.malloc(T)
adr = llmemory.cast_ptr_to_adr(ptr)
self.stackroots.append(ptr)
assert not self.gc.pin(adr)
test_pinning_limit.GC_PARAMS = {'max_number_of_pinned_objects': 5}
def test_full_pinned_nursery_pin_fail(self):
- typeid = self.get_type_id(S)
+ typeid = self.get_type_id(T)
size = self.gc.fixed_size(typeid) + self.gc.gcheaderbuilder.size_gc_header
raw_size = llmemory.raw_malloc_usage(size)
object_mallocs = self.gc.nursery_size // raw_size
@@ -858,15 +865,15 @@
# but rather the case of a nursery full with pinned objects.
assert object_mallocs < self.gc.max_number_of_pinned_objects
for instance_nr in xrange(object_mallocs):
- ptr = self.malloc(S)
+ ptr = self.malloc(T)
adr = llmemory.cast_ptr_to_adr(ptr)
ptr.someInt = 100 + instance_nr
self.stackroots.append(ptr)
self.gc.pin(adr)
#
- # nursery should be full now, at least no space for another `S`.
+ # nursery should be full now, at least no space for another `T`.
# Next malloc should fail.
- py.test.raises(Exception, self.malloc, S)
+ py.test.raises(Exception, self.malloc, T)
test_full_pinned_nursery_pin_fail.GC_PARAMS = \
{'max_number_of_pinned_objects': 50}
From noreply at buildbot.pypy.org Tue Aug 19 17:48:53 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Tue, 19 Aug 2014 17:48:53 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: fix test to use class
without gc pointers.
Message-ID: <20140819154853.B8D721C1482@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72910:ea91bbc58941
Date: 2014-08-19 17:47 +0200
http://bitbucket.org/pypy/pypy/changeset/ea91bbc58941/
Log: fix test to use class without gc pointers.
As of changeset e87564c1623075847f63d55586c837db7f188f4c objects
with gc pointers can't be pinned.
diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py
--- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py
+++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py
@@ -23,6 +23,12 @@
next = None
+class Y(object):
+ # for pinning tests we need an object without references to other
+ # objects
+ def __init__(self, x=0):
+ self.x = x
+
class CheckError(Exception):
pass
@@ -788,7 +794,7 @@
@dont_look_inside
def get_y():
if not helper.inst:
- helper.inst = X()
+ helper.inst = Y()
helper.inst.x = 101
check(rgc.pin(helper.inst))
else:
@@ -818,7 +824,7 @@
@dont_look_inside
def get_y(n):
if not helper.inst:
- helper.inst = X()
+ helper.inst = Y()
helper.inst.x = 101
helper.pinned = True
check(rgc.pin(helper.inst))
@@ -862,14 +868,14 @@
@dont_look_inside
def get_instances():
if not helper.initialised:
- helper.inst1 = X()
+ helper.inst1 = Y()
helper.inst1.x = 101
check(rgc.pin(helper.inst1))
#
- helper.inst2 = X()
+ helper.inst2 = Y()
helper.inst2.x = 102
#
- helper.inst3 = X()
+ helper.inst3 = Y()
helper.inst3.x = 103
check(rgc.pin(helper.inst3))
#
From noreply at buildbot.pypy.org Tue Aug 19 19:05:23 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Tue, 19 Aug 2014 19:05:23 +0200 (CEST)
Subject: [pypy-commit] stmgc default: Add stm_call_on_commit(),
for implementing free() of raw memory
Message-ID: <20140819170523.7553C1C14FF@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r1319:1d2c771f29c4
Date: 2014-08-19 17:29 +0200
http://bitbucket.org/pypy/stmgc/changeset/1d2c771f29c4/
Log: Add stm_call_on_commit(), for implementing free() of raw memory from
non-inevitable transactions.
diff --git a/c7/stm/core.c b/c7/stm/core.c
--- a/c7/stm/core.c
+++ b/c7/stm/core.c
@@ -375,7 +375,8 @@
assert(list_is_empty(STM_PSEGMENT->young_weakrefs));
assert(tree_is_cleared(STM_PSEGMENT->young_outside_nursery));
assert(tree_is_cleared(STM_PSEGMENT->nursery_objects_shadows));
- assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_abort));
+ assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_commit_and_abort[0]));
+ assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_commit_and_abort[1]));
assert(STM_PSEGMENT->objects_pointing_to_nursery == NULL);
assert(STM_PSEGMENT->large_overflow_objects == NULL);
#ifndef NDEBUG
@@ -850,7 +851,7 @@
STM_PSEGMENT->overflow_number_has_been_used = false;
}
- clear_callbacks_on_abort();
+ invoke_and_clear_user_callbacks(0); /* for commit */
/* send what is hopefully the correct signals */
if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) {
@@ -1044,7 +1045,7 @@
memset(tl->mem_clear_on_abort, 0, tl->mem_bytes_to_clear_on_abort);
/* invoke the callbacks */
- invoke_and_clear_callbacks_on_abort();
+ invoke_and_clear_user_callbacks(1); /* for abort */
int attribute_to = STM_TIME_RUN_ABORTED_OTHER;
@@ -1101,7 +1102,7 @@
wait_for_end_of_inevitable_transaction(NULL);
STM_PSEGMENT->transaction_state = TS_INEVITABLE;
stm_rewind_jmp_forget(STM_SEGMENT->running_thread);
- clear_callbacks_on_abort();
+ invoke_and_clear_user_callbacks(0); /* for commit */
}
else {
assert(STM_PSEGMENT->transaction_state == TS_INEVITABLE);
diff --git a/c7/stm/core.h b/c7/stm/core.h
--- a/c7/stm/core.h
+++ b/c7/stm/core.h
@@ -132,8 +132,9 @@
weakrefs never point to young objects and never contain NULL. */
struct list_s *old_weakrefs;
- /* Tree of 'key->callback' associations from stm_call_on_abort() */
- struct tree_s *callbacks_on_abort;
+ /* Tree of 'key->callback' associations from stm_call_on_commit()
+ and stm_call_on_abort() (respectively, array items 0 and 1) */
+ struct tree_s *callbacks_on_commit_and_abort[2];
/* Start time: to know approximately for how long a transaction has
been running, in contention management */
diff --git a/c7/stm/extra.c b/c7/stm/extra.c
--- a/c7/stm/extra.c
+++ b/c7/stm/extra.c
@@ -3,55 +3,76 @@
#endif
-void stm_call_on_abort(stm_thread_local_t *tl,
- void *key, void callback(void *))
+static bool register_callbacks(stm_thread_local_t *tl,
+ void *key, void callback(void *), long index)
{
if (!_stm_in_transaction(tl)) {
/* check that the current thread-local is really running a
transaction, and do nothing otherwise. */
- return;
+ return false;
}
if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) {
/* ignore callbacks if we're in an inevitable transaction
(which cannot abort) */
- return;
+ return false;
}
+ struct tree_s *callbacks;
+ callbacks = STM_PSEGMENT->callbacks_on_commit_and_abort[index];
+
if (callback == NULL) {
/* ignore the return value: unregistered keys can be
"deleted" again */
- tree_delete_item(STM_PSEGMENT->callbacks_on_abort, (uintptr_t)key);
+ tree_delete_item(callbacks, (uintptr_t)key);
}
else {
/* double-registering the same key will crash */
- tree_insert(STM_PSEGMENT->callbacks_on_abort,
- (uintptr_t)key, (uintptr_t)callback);
+ tree_insert(callbacks, (uintptr_t)key, (uintptr_t)callback);
+ }
+ return true;
+}
+
+void stm_call_on_commit(stm_thread_local_t *tl,
+ void *key, void callback(void *))
+{
+ if (!register_callbacks(tl, key, callback, 0)) {
+ /* no regular transaction running, invoke the callback
+ immediately */
+ callback(key);
}
}
-static void clear_callbacks_on_abort(void)
+void stm_call_on_abort(stm_thread_local_t *tl,
+ void *key, void callback(void *))
{
- if (!tree_is_cleared(STM_PSEGMENT->callbacks_on_abort))
- tree_clear(STM_PSEGMENT->callbacks_on_abort);
+ register_callbacks(tl, key, callback, 1);
}
-static void invoke_and_clear_callbacks_on_abort(void)
+static void invoke_and_clear_user_callbacks(long index)
{
- wlog_t *item;
- struct tree_s *callbacks = STM_PSEGMENT->callbacks_on_abort;
+ struct tree_s *callbacks;
+
+ /* clear the callbacks that we don't want to invoke at all */
+ callbacks = STM_PSEGMENT->callbacks_on_commit_and_abort[1 - index];
+ if (!tree_is_cleared(callbacks))
+ tree_clear(callbacks);
+
+ /* invoke the callbacks from the other group */
+ callbacks = STM_PSEGMENT->callbacks_on_commit_and_abort[index];
if (tree_is_cleared(callbacks))
return;
- STM_PSEGMENT->callbacks_on_abort = tree_create();
+ STM_PSEGMENT->callbacks_on_commit_and_abort[index] = tree_create();
+ wlog_t *item;
TREE_LOOP_FORWARD(*callbacks, item) {
void *key = (void *)item->addr;
void (*callback)(void *) = (void(*)(void *))item->val;
assert(key != NULL);
assert(callback != NULL);
- /* The callback may call stm_call_on_abort(key, NULL). It is
- ignored, because 'callbacks_on_abort' was cleared already. */
+ /* The callback may call stm_call_on_abort(key, NULL). It is ignored,
+ because 'callbacks_on_commit_and_abort' was cleared already. */
callback(key);
} TREE_LOOP_END;
diff --git a/c7/stm/extra.h b/c7/stm/extra.h
--- a/c7/stm/extra.h
+++ b/c7/stm/extra.h
@@ -1,3 +1,3 @@
-static void clear_callbacks_on_abort(void);
-static void invoke_and_clear_callbacks_on_abort(void);
+static void invoke_and_clear_user_callbacks(long index);
+/* 0 = for commit, 1 = for abort */
diff --git a/c7/stm/setup.c b/c7/stm/setup.c
--- a/c7/stm/setup.c
+++ b/c7/stm/setup.c
@@ -126,7 +126,8 @@
pr->old_weakrefs = list_create();
pr->young_outside_nursery = tree_create();
pr->nursery_objects_shadows = tree_create();
- pr->callbacks_on_abort = tree_create();
+ pr->callbacks_on_commit_and_abort[0] = tree_create();
+ pr->callbacks_on_commit_and_abort[1] = tree_create();
pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * i;
highest_overflow_number = pr->overflow_number;
pr->pub.transaction_read_version = 0xff;
@@ -166,7 +167,8 @@
list_free(pr->old_weakrefs);
tree_free(pr->young_outside_nursery);
tree_free(pr->nursery_objects_shadows);
- tree_free(pr->callbacks_on_abort);
+ tree_free(pr->callbacks_on_commit_and_abort[0]);
+ tree_free(pr->callbacks_on_commit_and_abort[1]);
}
munmap(stm_object_pages, TOTAL_MEMORY);
diff --git a/c7/stmgc.h b/c7/stmgc.h
--- a/c7/stmgc.h
+++ b/c7/stmgc.h
@@ -416,6 +416,12 @@
Note: 'key' must be aligned to a multiple of 8 bytes. */
void stm_call_on_abort(stm_thread_local_t *, void *key, void callback(void *));
+/* If the current transaction commits later, invoke 'callback(key)'. If
+ the current transaction aborts, then the callback is forgotten. Same
+ restrictions as stm_call_on_abort(). If the transaction is or becomes
+ inevitable, 'callback(key)' is called immediately. */
+void stm_call_on_commit(stm_thread_local_t *, void *key, void callback(void *));
+
/* Similar to stm_become_inevitable(), but additionally suspend all
other threads. A very heavy-handed way to make sure that no other
diff --git a/c7/test/support.py b/c7/test/support.py
--- a/c7/test/support.py
+++ b/c7/test/support.py
@@ -111,6 +111,7 @@
int stm_can_move(object_t *);
void stm_call_on_abort(stm_thread_local_t *, void *key, void callback(void *));
+void stm_call_on_commit(stm_thread_local_t *, void *key, void callback(void *));
#define STM_TIME_OUTSIDE_TRANSACTION ...
#define STM_TIME_RUN_CURRENT ...
diff --git a/c7/test/test_extra.py b/c7/test/test_extra.py
--- a/c7/test/test_extra.py
+++ b/c7/test/test_extra.py
@@ -81,6 +81,85 @@
self.abort_transaction()
assert seen == []
+ def test_call_on_commit(self):
+ p0 = ffi_new_aligned("aaa")
+ p1 = ffi_new_aligned("hello")
+ p2 = ffi_new_aligned("removed")
+ p3 = ffi_new_aligned("world")
+ #
+ @ffi.callback("void(void *)")
+ def clear_me(p):
+ p = ffi.cast("char *", p)
+ p[0] = chr(ord(p[0]) + 1)
+ #
+ self.start_transaction()
+ lib.stm_call_on_commit(self.get_stm_thread_local(), p0, clear_me)
+ # the registered callbacks are not called on abort
+ self.abort_transaction()
+ assert ffi.string(p0) == "aaa"
+ #
+ self.start_transaction()
+ lib.stm_call_on_commit(self.get_stm_thread_local(), p1, clear_me)
+ lib.stm_call_on_commit(self.get_stm_thread_local(), p2, clear_me)
+ lib.stm_call_on_commit(self.get_stm_thread_local(), p3, clear_me)
+ lib.stm_call_on_commit(self.get_stm_thread_local(), p2, ffi.NULL)
+ assert ffi.string(p0) == "aaa"
+ assert ffi.string(p1) == "hello"
+ assert ffi.string(p2) == "removed"
+ assert ffi.string(p3) == "world"
+ self.commit_transaction()
+ #
+ assert ffi.string(p0) == "aaa"
+ assert ffi.string(p1) == "iello"
+ assert ffi.string(p2) == "removed"
+ assert ffi.string(p3) == "xorld"
+
+ def test_call_on_commit_immediately_if_inevitable(self):
+ p0 = ffi_new_aligned("aaa")
+ self.start_transaction()
+ self.become_inevitable()
+ #
+ @ffi.callback("void(void *)")
+ def clear_me(p):
+ p = ffi.cast("char *", p)
+ p[0] = chr(ord(p[0]) + 1)
+ #
+ lib.stm_call_on_commit(self.get_stm_thread_local(), p0, clear_me)
+ assert ffi.string(p0) == "baa"
+ self.commit_transaction()
+ assert ffi.string(p0) == "baa"
+
+ def test_call_on_commit_as_soon_as_inevitable(self):
+ p0 = ffi_new_aligned("aaa")
+ self.start_transaction()
+ #
+ @ffi.callback("void(void *)")
+ def clear_me(p):
+ p = ffi.cast("char *", p)
+ p[0] = chr(ord(p[0]) + 1)
+ #
+ lib.stm_call_on_commit(self.get_stm_thread_local(), p0, clear_me)
+ assert ffi.string(p0) == "aaa"
+ self.become_inevitable()
+ assert ffi.string(p0) == "baa"
+ self.commit_transaction()
+ assert ffi.string(p0) == "baa"
+
+ def test_call_on_commit_immediately_if_outside_transaction(self):
+ p0 = ffi_new_aligned("aaa")
+ #
+ @ffi.callback("void(void *)")
+ def clear_me(p):
+ p = ffi.cast("char *", p)
+ p[0] = chr(ord(p[0]) + 1)
+ #
+ lib.stm_call_on_commit(self.get_stm_thread_local(), p0, clear_me)
+ assert ffi.string(p0) == "baa"
+ self.start_transaction()
+ assert ffi.string(p0) == "baa"
+ self.commit_transaction()
+ assert ffi.string(p0) == "baa"
+
def test_stm_become_globally_unique_transaction(self):
self.start_transaction()
#
From noreply at buildbot.pypy.org Tue Aug 19 19:05:24 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Tue, 19 Aug 2014 19:05:24 +0200 (CEST)
Subject: [pypy-commit] stmgc default: Add a return value to
stm_call_on_xxx() to know if a call with NULL
Message-ID: <20140819170524.AC4661C14FF@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r1320:bea13491352f
Date: 2014-08-19 18:52 +0200
http://bitbucket.org/pypy/stmgc/changeset/bea13491352f/
Log: Add a return value to stm_call_on_xxx() to know if a call with NULL
really cancelled something or not.
diff --git a/c7/stm/extra.c b/c7/stm/extra.c
--- a/c7/stm/extra.c
+++ b/c7/stm/extra.c
@@ -3,50 +3,51 @@
#endif
-static bool register_callbacks(stm_thread_local_t *tl,
+static long register_callbacks(stm_thread_local_t *tl,
void *key, void callback(void *), long index)
{
if (!_stm_in_transaction(tl)) {
/* check that the current thread-local is really running a
transaction, and do nothing otherwise. */
- return false;
+ return -1;
}
if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) {
/* ignore callbacks if we're in an inevitable transaction
(which cannot abort) */
- return false;
+ return -1;
}
struct tree_s *callbacks;
callbacks = STM_PSEGMENT->callbacks_on_commit_and_abort[index];
if (callback == NULL) {
- /* ignore the return value: unregistered keys can be
- "deleted" again */
- tree_delete_item(callbacks, (uintptr_t)key);
+ /* double-unregistering works, but return 0 */
+ return tree_delete_item(callbacks, (uintptr_t)key);
}
else {
/* double-registering the same key will crash */
tree_insert(callbacks, (uintptr_t)key, (uintptr_t)callback);
+ return 1;
}
- return true;
}
-void stm_call_on_commit(stm_thread_local_t *tl,
+long stm_call_on_commit(stm_thread_local_t *tl,
void *key, void callback(void *))
{
- if (!register_callbacks(tl, key, callback, 0)) {
+ long result = register_callbacks(tl, key, callback, 0);
+ if (result < 0 && callback != NULL) {
/* no regular transaction running, invoke the callback
immediately */
callback(key);
}
+ return result;
}
-void stm_call_on_abort(stm_thread_local_t *tl,
+long stm_call_on_abort(stm_thread_local_t *tl,
void *key, void callback(void *))
{
- register_callbacks(tl, key, callback, 1);
+ return register_callbacks(tl, key, callback, 1);
}
static void invoke_and_clear_user_callbacks(long index)
diff --git a/c7/stmgc.h b/c7/stmgc.h
--- a/c7/stmgc.h
+++ b/c7/stmgc.h
@@ -412,15 +412,16 @@
/* If the current transaction aborts later, invoke 'callback(key)'. If
the current transaction commits, then the callback is forgotten. You
can only register one callback per key. You can call
- 'stm_call_on_abort(key, NULL)' to cancel an existing callback.
+ 'stm_call_on_abort(key, NULL)' to cancel an existing callback
+ (returns 0 if there was no existing callback to cancel).
Note: 'key' must be aligned to a multiple of 8 bytes. */
-void stm_call_on_abort(stm_thread_local_t *, void *key, void callback(void *));
+long stm_call_on_abort(stm_thread_local_t *, void *key, void callback(void *));
/* If the current transaction commits later, invoke 'callback(key)'. If
the current transaction aborts, then the callback is forgotten. Same
restrictions as stm_call_on_abort(). If the transaction is or becomes
inevitable, 'callback(key)' is called immediately. */
-void stm_call_on_commit(stm_thread_local_t *, void *key, void callback(void *));
+long stm_call_on_commit(stm_thread_local_t *, void *key, void callback(void *));
/* Similar to stm_become_inevitable(), but additionally suspend all
diff --git a/c7/test/support.py b/c7/test/support.py
--- a/c7/test/support.py
+++ b/c7/test/support.py
@@ -109,9 +109,9 @@
long stm_id(object_t *obj);
void stm_set_prebuilt_identityhash(object_t *obj, uint64_t hash);
-int stm_can_move(object_t *);
-void stm_call_on_abort(stm_thread_local_t *, void *key, void callback(void *));
-void stm_call_on_commit(stm_thread_local_t *, void *key, void callback(void *));
+long stm_can_move(object_t *);
+long stm_call_on_abort(stm_thread_local_t *, void *key, void callback(void *));
+long stm_call_on_commit(stm_thread_local_t *, void *key, void callback(void *));
#define STM_TIME_OUTSIDE_TRANSACTION ...
#define STM_TIME_RUN_CURRENT ...
diff --git a/c7/test/test_extra.py b/c7/test/test_extra.py
--- a/c7/test/test_extra.py
+++ b/c7/test/test_extra.py
@@ -32,6 +32,7 @@
p1 = ffi_new_aligned("hello")
p2 = ffi_new_aligned("removed")
p3 = ffi_new_aligned("world")
+ p4 = ffi_new_aligned("00")
#
@ffi.callback("void(void *)")
def clear_me(p):
@@ -39,17 +40,26 @@
p[0] = chr(ord(p[0]) + 1)
#
self.start_transaction()
- lib.stm_call_on_abort(self.get_stm_thread_local(), p0, clear_me)
+ x = lib.stm_call_on_abort(self.get_stm_thread_local(), p0, clear_me)
+ assert x != 0
# the registered callbacks are removed on
# successful commit
self.commit_transaction()
assert ffi.string(p0) == "aaa"
#
self.start_transaction()
- lib.stm_call_on_abort(self.get_stm_thread_local(), p1, clear_me)
- lib.stm_call_on_abort(self.get_stm_thread_local(), p2, clear_me)
- lib.stm_call_on_abort(self.get_stm_thread_local(), p3, clear_me)
- lib.stm_call_on_abort(self.get_stm_thread_local(), p2, ffi.NULL)
+ x = lib.stm_call_on_abort(self.get_stm_thread_local(), p1, clear_me)
+ assert x != 0
+ x = lib.stm_call_on_abort(self.get_stm_thread_local(), p2, clear_me)
+ assert x != 0
+ x = lib.stm_call_on_abort(self.get_stm_thread_local(), p3, clear_me)
+ assert x != 0
+ x = lib.stm_call_on_abort(self.get_stm_thread_local(), p2, ffi.NULL)
+ assert x != 0
+ x = lib.stm_call_on_abort(self.get_stm_thread_local(), p2, ffi.NULL)
+ assert x == 0
+ x = lib.stm_call_on_abort(self.get_stm_thread_local(), p4, ffi.NULL)
+ assert x == 0
assert ffi.string(p0) == "aaa"
assert ffi.string(p1) == "hello"
assert ffi.string(p2) == "removed"
@@ -68,6 +78,7 @@
assert ffi.string(p1) == "iello"
assert ffi.string(p2) == "removed"
assert ffi.string(p3) == "xorld"
+ assert ffi.string(p4) == "00"
def test_ignores_if_outside_transaction(self):
@ffi.callback("void(void *)")
@@ -76,7 +87,8 @@
#
seen = []
p0 = ffi_new_aligned("aaa")
- lib.stm_call_on_abort(self.get_stm_thread_local(), p0, dont_see_me)
+ x = lib.stm_call_on_abort(self.get_stm_thread_local(), p0, dont_see_me)
+ assert x != 0
self.start_transaction()
self.abort_transaction()
assert seen == []
@@ -86,6 +98,7 @@
p1 = ffi_new_aligned("hello")
p2 = ffi_new_aligned("removed")
p3 = ffi_new_aligned("world")
+ p4 = ffi_new_aligned("00")
#
@ffi.callback("void(void *)")
def clear_me(p):
@@ -93,16 +106,25 @@
p[0] = chr(ord(p[0]) + 1)
#
self.start_transaction()
- lib.stm_call_on_commit(self.get_stm_thread_local(), p0, clear_me)
+ x = lib.stm_call_on_commit(self.get_stm_thread_local(), p0, clear_me)
+ assert x != 0
# the registered callbacks are not called on abort
self.abort_transaction()
assert ffi.string(p0) == "aaa"
#
self.start_transaction()
- lib.stm_call_on_commit(self.get_stm_thread_local(), p1, clear_me)
- lib.stm_call_on_commit(self.get_stm_thread_local(), p2, clear_me)
- lib.stm_call_on_commit(self.get_stm_thread_local(), p3, clear_me)
- lib.stm_call_on_commit(self.get_stm_thread_local(), p2, ffi.NULL)
+ x = lib.stm_call_on_commit(self.get_stm_thread_local(), p1, clear_me)
+ assert x != 0
+ x = lib.stm_call_on_commit(self.get_stm_thread_local(), p2, clear_me)
+ assert x != 0
+ x = lib.stm_call_on_commit(self.get_stm_thread_local(), p3, clear_me)
+ assert x != 0
+ x = lib.stm_call_on_commit(self.get_stm_thread_local(), p2, ffi.NULL)
+ assert x != 0
+ x = lib.stm_call_on_commit(self.get_stm_thread_local(), p2, ffi.NULL)
+ assert x == 0
+ x = lib.stm_call_on_commit(self.get_stm_thread_local(), p4, ffi.NULL)
+ assert x == 0
assert ffi.string(p0) == "aaa"
assert ffi.string(p1) == "hello"
assert ffi.string(p2) == "removed"
@@ -113,6 +135,7 @@
assert ffi.string(p1) == "iello"
assert ffi.string(p2) == "removed"
assert ffi.string(p3) == "xorld"
+ assert ffi.string(p4) == "00"
def test_call_on_commit_immediately_if_inevitable(self):
p0 = ffi_new_aligned("aaa")
From noreply at buildbot.pypy.org Tue Aug 19 19:05:55 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Tue, 19 Aug 2014 19:05:55 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7: import stmgc/1d2c771f29c4
Message-ID: <20140819170555.2EEEE1C14FF@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7
Changeset: r72911:adc9972b698b
Date: 2014-08-19 17:30 +0200
http://bitbucket.org/pypy/pypy/changeset/adc9972b698b/
Log: import stmgc/1d2c771f29c4
diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision
--- a/rpython/translator/stm/src_stm/revision
+++ b/rpython/translator/stm/src_stm/revision
@@ -1,1 +1,1 @@
-e85ce411f190
+1d2c771f29c4
diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c
--- a/rpython/translator/stm/src_stm/stm/core.c
+++ b/rpython/translator/stm/src_stm/stm/core.c
@@ -376,7 +376,8 @@
assert(list_is_empty(STM_PSEGMENT->young_weakrefs));
assert(tree_is_cleared(STM_PSEGMENT->young_outside_nursery));
assert(tree_is_cleared(STM_PSEGMENT->nursery_objects_shadows));
- assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_abort));
+ assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_commit_and_abort[0]));
+ assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_commit_and_abort[1]));
assert(STM_PSEGMENT->objects_pointing_to_nursery == NULL);
assert(STM_PSEGMENT->large_overflow_objects == NULL);
#ifndef NDEBUG
@@ -851,7 +852,7 @@
STM_PSEGMENT->overflow_number_has_been_used = false;
}
- clear_callbacks_on_abort();
+ invoke_and_clear_user_callbacks(0); /* for commit */
/* send what is hopefully the correct signals */
if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) {
@@ -1045,7 +1046,7 @@
memset(tl->mem_clear_on_abort, 0, tl->mem_bytes_to_clear_on_abort);
/* invoke the callbacks */
- invoke_and_clear_callbacks_on_abort();
+ invoke_and_clear_user_callbacks(1); /* for abort */
int attribute_to = STM_TIME_RUN_ABORTED_OTHER;
@@ -1102,7 +1103,7 @@
wait_for_end_of_inevitable_transaction(NULL);
STM_PSEGMENT->transaction_state = TS_INEVITABLE;
stm_rewind_jmp_forget(STM_SEGMENT->running_thread);
- clear_callbacks_on_abort();
+ invoke_and_clear_user_callbacks(0); /* for commit */
}
else {
assert(STM_PSEGMENT->transaction_state == TS_INEVITABLE);
diff --git a/rpython/translator/stm/src_stm/stm/core.h b/rpython/translator/stm/src_stm/stm/core.h
--- a/rpython/translator/stm/src_stm/stm/core.h
+++ b/rpython/translator/stm/src_stm/stm/core.h
@@ -133,8 +133,9 @@
weakrefs never point to young objects and never contain NULL. */
struct list_s *old_weakrefs;
- /* Tree of 'key->callback' associations from stm_call_on_abort() */
- struct tree_s *callbacks_on_abort;
+ /* Tree of 'key->callback' associations from stm_call_on_commit()
+ and stm_call_on_abort() (respectively, array items 0 and 1) */
+ struct tree_s *callbacks_on_commit_and_abort[2];
/* Start time: to know approximately for how long a transaction has
been running, in contention management */
diff --git a/rpython/translator/stm/src_stm/stm/extra.c b/rpython/translator/stm/src_stm/stm/extra.c
--- a/rpython/translator/stm/src_stm/stm/extra.c
+++ b/rpython/translator/stm/src_stm/stm/extra.c
@@ -4,55 +4,76 @@
#endif
-void stm_call_on_abort(stm_thread_local_t *tl,
- void *key, void callback(void *))
+static bool register_callbacks(stm_thread_local_t *tl,
+ void *key, void callback(void *), long index)
{
if (!_stm_in_transaction(tl)) {
/* check that the current thread-local is really running a
transaction, and do nothing otherwise. */
- return;
+ return false;
}
if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) {
/* ignore callbacks if we're in an inevitable transaction
(which cannot abort) */
- return;
+ return false;
}
+ struct tree_s *callbacks;
+ callbacks = STM_PSEGMENT->callbacks_on_commit_and_abort[index];
+
if (callback == NULL) {
/* ignore the return value: unregistered keys can be
"deleted" again */
- tree_delete_item(STM_PSEGMENT->callbacks_on_abort, (uintptr_t)key);
+ tree_delete_item(callbacks, (uintptr_t)key);
}
else {
/* double-registering the same key will crash */
- tree_insert(STM_PSEGMENT->callbacks_on_abort,
- (uintptr_t)key, (uintptr_t)callback);
+ tree_insert(callbacks, (uintptr_t)key, (uintptr_t)callback);
+ }
+ return true;
+}
+
+void stm_call_on_commit(stm_thread_local_t *tl,
+ void *key, void callback(void *))
+{
+ if (!register_callbacks(tl, key, callback, 0)) {
+ /* no regular transaction running, invoke the callback
+ immediately */
+ callback(key);
}
}
-static void clear_callbacks_on_abort(void)
+void stm_call_on_abort(stm_thread_local_t *tl,
+ void *key, void callback(void *))
{
- if (!tree_is_cleared(STM_PSEGMENT->callbacks_on_abort))
- tree_clear(STM_PSEGMENT->callbacks_on_abort);
+ register_callbacks(tl, key, callback, 1);
}
-static void invoke_and_clear_callbacks_on_abort(void)
+static void invoke_and_clear_user_callbacks(long index)
{
- wlog_t *item;
- struct tree_s *callbacks = STM_PSEGMENT->callbacks_on_abort;
+ struct tree_s *callbacks;
+
+ /* clear the callbacks that we don't want to invoke at all */
+ callbacks = STM_PSEGMENT->callbacks_on_commit_and_abort[1 - index];
+ if (!tree_is_cleared(callbacks))
+ tree_clear(callbacks);
+
+ /* invoke the callbacks from the other group */
+ callbacks = STM_PSEGMENT->callbacks_on_commit_and_abort[index];
if (tree_is_cleared(callbacks))
return;
- STM_PSEGMENT->callbacks_on_abort = tree_create();
+ STM_PSEGMENT->callbacks_on_commit_and_abort[index] = tree_create();
+ wlog_t *item;
TREE_LOOP_FORWARD(*callbacks, item) {
void *key = (void *)item->addr;
void (*callback)(void *) = (void(*)(void *))item->val;
assert(key != NULL);
assert(callback != NULL);
- /* The callback may call stm_call_on_abort(key, NULL). It is
- ignored, because 'callbacks_on_abort' was cleared already. */
+ /* The callback may call stm_call_on_abort(key, NULL). It is ignored,
+ because 'callbacks_on_commit_and_abort' was cleared already. */
callback(key);
} TREE_LOOP_END;
diff --git a/rpython/translator/stm/src_stm/stm/extra.h b/rpython/translator/stm/src_stm/stm/extra.h
--- a/rpython/translator/stm/src_stm/stm/extra.h
+++ b/rpython/translator/stm/src_stm/stm/extra.h
@@ -1,4 +1,4 @@
/* Imported by rpython/translator/stm/import_stmgc.py */
-static void clear_callbacks_on_abort(void);
-static void invoke_and_clear_callbacks_on_abort(void);
+static void invoke_and_clear_user_callbacks(long index);
+/* 0 = for commit, 1 = for abort */
diff --git a/rpython/translator/stm/src_stm/stm/setup.c b/rpython/translator/stm/src_stm/stm/setup.c
--- a/rpython/translator/stm/src_stm/stm/setup.c
+++ b/rpython/translator/stm/src_stm/stm/setup.c
@@ -127,7 +127,8 @@
pr->old_weakrefs = list_create();
pr->young_outside_nursery = tree_create();
pr->nursery_objects_shadows = tree_create();
- pr->callbacks_on_abort = tree_create();
+ pr->callbacks_on_commit_and_abort[0] = tree_create();
+ pr->callbacks_on_commit_and_abort[1] = tree_create();
pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * i;
highest_overflow_number = pr->overflow_number;
pr->pub.transaction_read_version = 0xff;
@@ -167,7 +168,8 @@
list_free(pr->old_weakrefs);
tree_free(pr->young_outside_nursery);
tree_free(pr->nursery_objects_shadows);
- tree_free(pr->callbacks_on_abort);
+ tree_free(pr->callbacks_on_commit_and_abort[0]);
+ tree_free(pr->callbacks_on_commit_and_abort[1]);
}
munmap(stm_object_pages, TOTAL_MEMORY);
diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h
--- a/rpython/translator/stm/src_stm/stmgc.h
+++ b/rpython/translator/stm/src_stm/stmgc.h
@@ -417,6 +417,12 @@
Note: 'key' must be aligned to a multiple of 8 bytes. */
void stm_call_on_abort(stm_thread_local_t *, void *key, void callback(void *));
+/* If the current transaction commits later, invoke 'callback(key)'. If
+ the current transaction aborts, then the callback is forgotten. Same
+ restrictions as stm_call_on_abort(). If the transaction is or becomes
+ inevitable, 'callback(key)' is called immediately. */
+void stm_call_on_commit(stm_thread_local_t *, void *key, void callback(void *));
+
/* Similar to stm_become_inevitable(), but additionally suspend all
other threads. A very heavy-handed way to make sure that no other
From noreply at buildbot.pypy.org Tue Aug 19 19:05:56 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Tue, 19 Aug 2014 19:05:56 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7: import stmgc/bea13491352f
Message-ID: <20140819170556.55D281C14FF@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7
Changeset: r72912:ffabe32cdcb9
Date: 2014-08-19 18:52 +0200
http://bitbucket.org/pypy/pypy/changeset/ffabe32cdcb9/
Log: import stmgc/bea13491352f
diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision
--- a/rpython/translator/stm/src_stm/revision
+++ b/rpython/translator/stm/src_stm/revision
@@ -1,1 +1,1 @@
-1d2c771f29c4
+bea13491352f
diff --git a/rpython/translator/stm/src_stm/stm/extra.c b/rpython/translator/stm/src_stm/stm/extra.c
--- a/rpython/translator/stm/src_stm/stm/extra.c
+++ b/rpython/translator/stm/src_stm/stm/extra.c
@@ -4,50 +4,51 @@
#endif
-static bool register_callbacks(stm_thread_local_t *tl,
+static long register_callbacks(stm_thread_local_t *tl,
void *key, void callback(void *), long index)
{
if (!_stm_in_transaction(tl)) {
/* check that the current thread-local is really running a
transaction, and do nothing otherwise. */
- return false;
+ return -1;
}
if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) {
/* ignore callbacks if we're in an inevitable transaction
(which cannot abort) */
- return false;
+ return -1;
}
struct tree_s *callbacks;
callbacks = STM_PSEGMENT->callbacks_on_commit_and_abort[index];
if (callback == NULL) {
- /* ignore the return value: unregistered keys can be
- "deleted" again */
- tree_delete_item(callbacks, (uintptr_t)key);
+ /* double-unregistering works, but return 0 */
+ return tree_delete_item(callbacks, (uintptr_t)key);
}
else {
/* double-registering the same key will crash */
tree_insert(callbacks, (uintptr_t)key, (uintptr_t)callback);
+ return 1;
}
- return true;
}
-void stm_call_on_commit(stm_thread_local_t *tl,
+long stm_call_on_commit(stm_thread_local_t *tl,
void *key, void callback(void *))
{
- if (!register_callbacks(tl, key, callback, 0)) {
+ long result = register_callbacks(tl, key, callback, 0);
+ if (result < 0 && callback != NULL) {
/* no regular transaction running, invoke the callback
immediately */
callback(key);
}
+ return result;
}
-void stm_call_on_abort(stm_thread_local_t *tl,
+long stm_call_on_abort(stm_thread_local_t *tl,
void *key, void callback(void *))
{
- register_callbacks(tl, key, callback, 1);
+ return register_callbacks(tl, key, callback, 1);
}
static void invoke_and_clear_user_callbacks(long index)
diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h
--- a/rpython/translator/stm/src_stm/stmgc.h
+++ b/rpython/translator/stm/src_stm/stmgc.h
@@ -413,15 +413,16 @@
/* If the current transaction aborts later, invoke 'callback(key)'. If
the current transaction commits, then the callback is forgotten. You
can only register one callback per key. You can call
- 'stm_call_on_abort(key, NULL)' to cancel an existing callback.
+ 'stm_call_on_abort(key, NULL)' to cancel an existing callback
+ (returns 0 if there was no existing callback to cancel).
Note: 'key' must be aligned to a multiple of 8 bytes. */
-void stm_call_on_abort(stm_thread_local_t *, void *key, void callback(void *));
+long stm_call_on_abort(stm_thread_local_t *, void *key, void callback(void *));
/* If the current transaction commits later, invoke 'callback(key)'. If
the current transaction aborts, then the callback is forgotten. Same
restrictions as stm_call_on_abort(). If the transaction is or becomes
inevitable, 'callback(key)' is called immediately. */
-void stm_call_on_commit(stm_thread_local_t *, void *key, void callback(void *));
+long stm_call_on_commit(stm_thread_local_t *, void *key, void callback(void *));
/* Similar to stm_become_inevitable(), but additionally suspend all
From noreply at buildbot.pypy.org Tue Aug 19 19:05:57 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Tue, 19 Aug 2014 19:05:57 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7: Use stm_call_on_commit() to delay the
raw free().
Message-ID: <20140819170557.88F151C14FF@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7
Changeset: r72913:0febb0d1b4e7
Date: 2014-08-19 19:05 +0200
http://bitbucket.org/pypy/pypy/changeset/0febb0d1b4e7/
Log: Use stm_call_on_commit() to delay the raw free().
diff --git a/rpython/translator/c/src/mem.c b/rpython/translator/c/src/mem.c
--- a/rpython/translator/c/src/mem.c
+++ b/rpython/translator/c/src/mem.c
@@ -12,7 +12,7 @@
# else
# define try_pypy_debug_alloc_stop(p) /* nothing */
# endif
-void _pypy_stm_free(void *ptr)
+void _pypy_stm_cb_free(void *ptr)
{
/* This is called by src_stm/et.c when the transaction is aborted
and the 'ptr' was malloced but not freed. We have first to
@@ -24,6 +24,22 @@
PyObject_Free(ptr);
COUNT_FREE;
}
+void _pypy_stm_op_free(void *ptr)
+{
+ /* Called when RPython code contains OP_FREE or OP_RAW_FREE.
+ */
+ if (stm_call_on_abort(&stm_thread_local, ptr, NULL) == 0) {
+ /* There is a running non-inevitable transaction, but the object
+ was not registered during it, which means that it was created
+ before. In this case, we cannot immediately free it, but
+ only when a commit follows. */
+ stm_call_on_commit(&stm_thread_local, ptr, _pypy_stm_cb_free);
+ }
+ else {
+ /* In all other cases, free the object immediately. */
+ _pypy_stm_cb_free(ptr);
+ }
+}
#endif
diff --git a/rpython/translator/c/src/mem.h b/rpython/translator/c/src/mem.h
--- a/rpython/translator/c/src/mem.h
+++ b/rpython/translator/c/src/mem.h
@@ -16,13 +16,14 @@
#ifdef RPY_STM
-void _pypy_stm_free(void *);
+void _pypy_stm_cb_free(void *);
+void _pypy_stm_op_free(void *);
#define _OP_RAW_MALLOCED(r) stm_call_on_abort(&stm_thread_local, r, \
- _pypy_stm_free)
-#define _OP_RAW_STM_UNREGISTER(r) stm_call_on_abort(&stm_thread_local, r, NULL)
+ _pypy_stm_cb_free)
+#define OP_FREE(p) _pypy_stm_op_free(p)
#else
-#define _OP_RAW_MALLOCED(r) /* nothing */
-#define _OP_RAW_STM_UNREGISTER(r) /* nothing */
+#define _OP_RAW_MALLOCED(r) /* nothing */
+#define OP_FREE(p) PyObject_Free(p); COUNT_FREE
#endif
@@ -34,8 +35,7 @@
} \
}
-#define OP_RAW_FREE(p, r) PyObject_Free(p); COUNT_FREE; \
- _OP_RAW_STM_UNREGISTER(p);
+#define OP_RAW_FREE(p, r) OP_FREE(p)
#define OP_RAW_MEMCLEAR(p, size, r) memset((void*)p, 0, size)
@@ -54,8 +54,6 @@
/************************************************************/
-#define OP_FREE(p) OP_RAW_FREE(p, do_not_use)
-
#ifndef COUNT_OP_MALLOCS
#define COUNT_MALLOC /* nothing */
@@ -87,7 +85,11 @@
# define OP_TRACK_ALLOC_START(addr, r) pypy_debug_alloc_start(addr, \
__FUNCTION__)
-# define OP_TRACK_ALLOC_STOP(addr, r) pypy_debug_alloc_stop(addr)
+# ifdef RPY_STM
+# define OP_TRACK_ALLOC_STOP(addr, r) /* nothing */
+# else
+# define OP_TRACK_ALLOC_STOP(addr, r) pypy_debug_alloc_stop(addr)
+# endif
void pypy_debug_alloc_start(void*, const char*);
void pypy_debug_alloc_stop(void*);
diff --git a/rpython/translator/stm/inevitable.py b/rpython/translator/stm/inevitable.py
--- a/rpython/translator/stm/inevitable.py
+++ b/rpython/translator/stm/inevitable.py
@@ -96,7 +96,7 @@
if op.opname in MALLOCS:
return False
if op.opname in FREES:
- return True
+ return False
#
# Function calls
if op.opname == 'direct_call' or op.opname == 'indirect_call':
From noreply at buildbot.pypy.org Tue Aug 19 21:15:55 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Tue, 19 Aug 2014 21:15:55 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: reworked comments in
incminimark
Message-ID: <20140819191555.DCB621C3CB0@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72914:93b928f5cf40
Date: 2014-08-19 21:04 +0200
http://bitbucket.org/pypy/pypy/changeset/93b928f5cf40/
Log: reworked comments in incminimark
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -71,15 +71,15 @@
# * young objects: allocated in the nursery if they are not too large, or
# raw-malloced otherwise. The nursery is a fixed-size memory buffer of
# 4MB by default. When full, we do a minor collection;
-# the surviving objects from the nursery are moved outside, and the
-# non-surviving raw-malloced objects are freed. All surviving objects
-# become old.
+# - surviving objects from the nursery are moved outside and become old,
+# - non-surviving raw-malloced objects are freed,
+# - and pinned objects are kept at their place inside the nursery and stay
+# young.
#
# * old objects: never move again. These objects are either allocated by
# minimarkpage.py (if they are small), or raw-malloced (if they are not
# small). Collected by regular mark-n-sweep during major collections.
#
-# XXX update doc string to contain object pinning (groggi)
WORD = LONG_BIT // 8
@@ -132,8 +132,8 @@
# a minor collection.
GCFLAG_VISITED_RMY = first_gcflag << 8
-# The following flag is set on nursery objects of which we expect not to
-# move. This means that a young object with this flag is not moved out
+# The following flag is set on nursery objects to keep them in the nursery.
+# This means that a young object with this flag is not moved out
# of the nursery during a minor collection. See pin()/unpin() for further
# details.
GCFLAG_PINNED = first_gcflag << 9
@@ -264,7 +264,7 @@
# nursery. Has to fit at least one large object
"nursery_cleanup": 32768 * WORD,
- # Number of objects that are allowed to be pinned in the nursery
+ # Number of objects that are allowed to be pinned in the nursery
# at the same time. Must be lesser than or equal to the chunk size
# of an AddressStack.
"max_number_of_pinned_objects": 100,
@@ -379,21 +379,19 @@
# minor collection.
self.nursery_objects_shadows = self.AddressDict()
#
- # A sorted deque containing all pinned objects *before* the last
- # minor collection. This deque must be consulted when considering
- # next nursery ceiling.
+ # A sorted deque containing addresses of pinned objects.
+ # This collection is used to make sure we don't overwrite pinned objects.
+ # Each minor collection creates a new deque containing the active pinned
+ # objects. The addresses are used to set the next 'nursery_top'.
self.nursery_barriers = self.AddressDeque()
#
# Counter tracking how many pinned objects currently reside inside
# the nursery.
self.pinned_objects_in_nursery = 0
#
- # Keeps track of objects pointing to pinned objects. These objects
- # must be revisited every minor collection. Without this list
- # any old object inside this list would only be visited in case a
- # write barrier was triggered, which would result in not visiting
- # the young pinned object and would therefore result in removing
- # the pinned object.
+ # Keeps track of old objects pointing to pinned objects. These objects
+ # must be traced every minor collection. Without tracing them the
+ # referenced pinned object wouldn't be visited and therefore collected.
self.old_objects_pointing_to_pinned = self.AddressStack()
#
# Allocate a nursery. In case of auto_nursery_size, start by
@@ -708,8 +706,9 @@
is needed."""
# in general we always move 'self.nursery_top' by 'self.nursery_cleanup'.
- # However, because of the presence of pinned objects there are cases where
- # the GC can't move by 'self.nursery_cleanup' without overflowing the arena.
+ # However, because of the presence of pinned objects there are cases
+ # where the GC can't move by 'self.nursery_cleanup' without overflowing
+ # the arena.
# For such a case we use the space left in the nursery.
size = min(self.nursery_cleanup, self.nursery_real_top - self.nursery_top)
if llmemory.raw_malloc_usage(totalsize) > size:
@@ -749,14 +748,14 @@
self.nursery_free = self.nursery_top + pinned_obj_size
self.nursery_top = self.nursery_barriers.popleft()
#
- # because we encountered a barrier, we also have to fix
- # 'prev_result' as the one provided as a method parameter
- # can't be used as there is no space between 'prev_result'
- # and the barrier for 'totalsize'.
+ # because we encountered a barrier, we have to fix 'prev_result'.
+ # The one provided as parameter can't be used further as there
+ # is not enough space between 'prev_result' and and the barrier
+ # for an object of 'totalsize' size.
prev_result = self.nursery_free
else:
#
- # no barriers (i.e. pinned objects) after 'nursery_free'.
+ # no barriers (i.e. no pinned objects) after 'nursery_free'.
# If possible just enlarge the used part of the nursery.
# Otherwise we are forced to clean up the nursery.
if self.try_move_nursery_top(totalsize):
@@ -783,8 +782,8 @@
#
if self.nursery_free + totalsize > self.nursery_real_top:
# still not enough space, we need to collect.
- # maybe nursery contains too many pinned objects (see
- # assert below).
+ # maybe nursery contains too many pinned objects
+ # (see assert below).
self.minor_collection()
else:
# execute loop one more time. This should find
@@ -803,7 +802,6 @@
break
#
if self.debug_tiny_nursery >= 0: # for debugging
- # XXX solution for this assert? (groggi)
ll_assert(not self.nursery_barriers.non_empty(),
"no support for nursery debug and pinning")
if self.nursery_top - self.nursery_free > self.debug_tiny_nursery:
@@ -983,19 +981,18 @@
if self.pinned_objects_in_nursery >= self.max_number_of_pinned_objects:
return False
if not self.is_in_nursery(obj):
- # Old objects are already non-moving, therefore pinning
+ # old objects are already non-moving, therefore pinning
# makes no sense. If you run into this case, you may forgot
- # to check if can_move(obj) already returns True in which
- # case a call to pin() is unnecessary.
+ # to check can_move(obj).
return False
if self.has_gcptr(self.get_type_id(obj)):
# objects containing GC pointers can't be pinned. If we would add
# it, we would have to track all pinned objects and trace them
# every minor collection to make sure the referenced object are
- # kept alive.
+ # kept alive. Right now this is not a use case that's needed.
return False
if self._is_pinned(obj):
- # Already pinned, we do not allow to pin it again.
+ # already pinned, we do not allow to pin it again.
# Reason: It would be possible that the first caller unpins
# while the second caller thinks it's still pinned.
return False
@@ -1193,9 +1190,6 @@
else:
ll_assert(self.is_in_nursery(obj),
"pinned object not in nursery")
- # XXX check if we can support that or if it makes no sense (groggi)
- ll_assert(not self.header(obj).tid & GCFLAG_TRACK_YOUNG_PTRS,
- "pinned nursery object with GCFLAG_TRACK_YOUNG_PTRS")
if self.gc_state == STATE_SCANNING:
self._debug_check_object_scanning(obj)
@@ -1544,9 +1538,9 @@
# where this stack is filled. Pinning an object only prevents it from
# being moved, not from being collected if it is not reachable anymore.
self.surviving_pinned_objects = self.AddressStack()
- #
- # The following counter keeps track of the amount of alive and pinned
- # objects inside the nursery.
+ # The following counter keeps track of alive and pinned young objects
+ # inside the nursery. We reset it here and increace it in
+ # '_trace_drag_out()'.
self.pinned_objects_in_nursery = 0
#
# Before everything else, remove from 'old_objects_pointing_to_young'
@@ -1578,14 +1572,12 @@
# with pinned object that are (only) visible from an old
# object.
# Additionally we create a new list as it may be that an old object
- # no longer points to a pinned one and we want them to remove from
- # the list.
+ # no longer points to a pinned one. Such old objects won't be added
+ # again to 'old_objects_pointing_to_pinned'.
if self.old_objects_pointing_to_pinned.non_empty():
current_old_objects_pointing_to_pinned = \
self.old_objects_pointing_to_pinned
- #
self.old_objects_pointing_to_pinned = self.AddressStack()
- # visit the ones we know of
current_old_objects_pointing_to_pinned.foreach(
self._visit_old_objects_pointing_to_pinned, None)
current_old_objects_pointing_to_pinned.delete()
@@ -1639,7 +1631,7 @@
self.free_young_rawmalloced_objects()
#
# All live nursery objects are out of the nursery or pinned inside
- # the nursery. Create nursery barriers to protect the pinned object,
+ # the nursery. Create nursery barriers to protect the pinned objects,
# fill the rest of the nursery with zeros and reset the current nursery
# pointer.
size_gc_header = self.gcheaderbuilder.size_gc_header
@@ -1673,10 +1665,11 @@
llarena.arena_reset(prev, self.nursery_real_top - prev, 0)
#
# We assume that there are only a few pinned objects. Therefore, if there
- # is 'self.nursery_cleanup' space between the nursery's start ('self.nursery')
- # and the last pinned object ('prev'), we conclude that there is enough zeroed
- # space inside the arena to use for new allocation. Otherwise we fill
- # the nursery with zeros for 'self.nursery_cleanup' of space.
+ # is 'self.nursery_cleanup' space between the nursery's start
+ # ('self.nursery') and the last pinned object ('prev'), we conclude that
+ # there is enough zeroed space inside the arena to use for new
+ # allocation. Otherwise we fill the nursery with zeros for
+ # 'self.nursery_cleanup' of space.
if prev - self.nursery >= self.nursery_cleanup:
nursery_barriers.append(prev)
else:
@@ -1884,21 +1877,25 @@
#
elif self._is_pinned(obj):
hdr = self.header(obj)
- # track parent of pinned object specially
+ #
+ # track parent of pinned object specially. This mus be done before
+ # checking for GCFLAG_VISITED: it may be that the same pinned object
+ # is reachable from multiple sources (e.g. two old objects pointing
+ # to the same pinned object). In such a case we need all parents
+ # of the pinned object in the list. Otherwise he pinned object could
+ # become dead and be removed just because the first parent of it
+ # is dead and collected.
if parent != llmemory.NULL and \
not self.header(parent).tid & GCFLAG_PINNED_OBJECT_PARENT_KNOWN:
#
self.old_objects_pointing_to_pinned.append(parent)
self.header(parent).tid |= GCFLAG_PINNED
-
+ #
if hdr.tid & GCFLAG_VISITED:
- # already visited and keeping track of the object
return
+ #
hdr.tid |= GCFLAG_VISITED
#
- # XXX add additional checks for unsupported pinned objects (groggi)
- ll_assert(not self.header(obj).tid & GCFLAG_HAS_CARDS,
- "pinned object with GCFLAG_HAS_CARDS not supported")
self.surviving_pinned_objects.append(
llarena.getfakearenaaddress(obj - size_gc_header))
self.pinned_objects_in_nursery += 1
@@ -2124,7 +2121,7 @@
# Light finalizers
if self.old_objects_with_light_finalizers.non_empty():
self.deal_with_old_objects_with_finalizers()
- #objects_to_trace processed fully, can move on to sweeping
+ # objects_to_trace processed fully, can move on to sweeping
self.ac.mass_free_prepare()
self.start_free_rawmalloc_objects()
#
@@ -2136,7 +2133,8 @@
self._sweep_old_objects_pointing_to_pinned,
new_old_objects_pointing_to_pinned)
self.old_objects_pointing_to_pinned.delete()
- self.old_objects_pointing_to_pinned = new_old_objects_pointing_to_pinned
+ self.old_objects_pointing_to_pinned = \
+ new_old_objects_pointing_to_pinned
self.gc_state = STATE_SWEEPING
#END MARKING
elif self.gc_state == STATE_SWEEPING:
@@ -2333,9 +2331,10 @@
# flag set, then the object should be in 'prebuilt_root_objects',
# and the GCFLAG_VISITED will be reset at the end of the
# collection.
+ # Objects with GCFLAG_PINNED can't have gcptrs (see pin()), they can be
+ # ignored.
hdr = self.header(obj)
if hdr.tid & (GCFLAG_VISITED | GCFLAG_NO_HEAP_PTRS | GCFLAG_PINNED):
- # XXX ^^^ update doc in any way because of GCFLAG_PINNED addition? (groggi)
return 0
#
# It's the first time. We set the flag VISITED. The trick is
From noreply at buildbot.pypy.org Tue Aug 19 21:15:57 2014
From: noreply at buildbot.pypy.org (groggi)
Date: Tue, 19 Aug 2014 21:15:57 +0200 (CEST)
Subject: [pypy-commit] pypy gc-incminimark-pinning: remove outdated comments
Message-ID: <20140819191557.1E6831C3CB0@cobra.cs.uni-duesseldorf.de>
Author: Gregor Wegberg
Branch: gc-incminimark-pinning
Changeset: r72915:d704858f1a7d
Date: 2014-08-19 21:11 +0200
http://bitbucket.org/pypy/pypy/changeset/d704858f1a7d/
Log: remove outdated comments
diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py
--- a/rpython/rlib/rgc.py
+++ b/rpython/rlib/rgc.py
@@ -20,7 +20,6 @@
# for test purposes we allow objects to be pinned and use
# the following list to keep track of the pinned objects
-# XXX think about possible unexpected behavior (groggi)
if not we_are_translated():
pinned_objects = []
diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py
--- a/rpython/rtyper/lltypesystem/rffi.py
+++ b/rpython/rtyper/lltypesystem/rffi.py
@@ -803,14 +803,6 @@
"""
Either free a non-moving buffer or keep the original storage alive.
"""
- # We cannot rely on rgc.can_move(data) here, because its result
- # might have changed since get_nonmovingbuffer(). Instead we check
- # if 'buf' points inside 'data'. This is only possible if we
- # followed the 2nd case in get_nonmovingbuffer(); in the first case,
- # 'buf' points to its own raw-malloced memory.
- # XXX fix comment (groggi)
-
-
if is_pinned:
rgc.unpin(data)
if is_raw:
From noreply at buildbot.pypy.org Tue Aug 19 22:14:05 2014
From: noreply at buildbot.pypy.org (numerodix)
Date: Tue, 19 Aug 2014 22:14:05 +0200 (CEST)
Subject: [pypy-commit] pypy py3.3-fixes2: remove time.accept2dyear (removed
in pythong 3.3)
Message-ID: <20140819201405.2EE2C1C14FF@cobra.cs.uni-duesseldorf.de>
Author: Martin Matusiak
Branch: py3.3-fixes2
Changeset: r72916:df851de79c11
Date: 2014-08-19 20:20 +0200
http://bitbucket.org/pypy/pypy/changeset/df851de79c11/
Log: remove time.accept2dyear (removed in pythong 3.3)
diff --git a/pypy/module/rctime/__init__.py b/pypy/module/rctime/__init__.py
--- a/pypy/module/rctime/__init__.py
+++ b/pypy/module/rctime/__init__.py
@@ -39,5 +39,3 @@
from pypy.module.rctime import interp_time
interp_time._init_timezone(space)
- interp_time._init_accept2dyear(space)
-
diff --git a/pypy/module/rctime/interp_time.py b/pypy/module/rctime/interp_time.py
--- a/pypy/module/rctime/interp_time.py
+++ b/pypy/module/rctime/interp_time.py
@@ -198,13 +198,6 @@
c_strftime = external('strftime', [rffi.CCHARP, rffi.SIZE_T, rffi.CCHARP, TM_P],
rffi.SIZE_T)
-def _init_accept2dyear(space):
- if os.environ.get("PYTHONY2K"):
- accept2dyear = 0
- else:
- accept2dyear = 1
- _set_module_object(space, "accept2dyear", space.wrap(accept2dyear))
-
def _init_timezone(space):
timezone = daylight = altzone = 0
tzname = ["", ""]
@@ -435,21 +428,6 @@
glob_buf.c_tm_zone = lltype.nullptr(rffi.CCHARP.TO)
rffi.setintfield(glob_buf, 'c_tm_gmtoff', 0)
- if y < 1000:
- w_accept2dyear = _get_module_object(space, "accept2dyear")
- accept2dyear = space.is_true(w_accept2dyear)
-
- if accept2dyear:
- if 69 <= y <= 99:
- y += 1900
- elif 0 <= y <= 68:
- y += 2000
- else:
- raise OperationError(space.w_ValueError,
- space.wrap("year out of range"))
- space.warn(space.wrap("Century info guessed for a 2-digit year."),
- space.w_DeprecationWarning)
-
# tm_wday does not need checking of its upper-bound since taking "%
# 7" in _gettmarg() automatically restricts the range.
if rffi.getintfield(glob_buf, 'c_tm_wday') < -1:
diff --git a/pypy/module/rctime/test/test_rctime.py b/pypy/module/rctime/test/test_rctime.py
--- a/pypy/module/rctime/test/test_rctime.py
+++ b/pypy/module/rctime/test/test_rctime.py
@@ -5,7 +5,6 @@
def test_attributes(self):
import time as rctime
- assert isinstance(rctime.accept2dyear, int)
assert isinstance(rctime.altzone, int)
assert isinstance(rctime.daylight, int)
assert isinstance(rctime.timezone, int)
@@ -101,22 +100,16 @@
res = rctime.mktime(rctime.localtime())
assert isinstance(res, float)
+ # year cannot be -1
ltime = rctime.localtime()
- rctime.accept2dyear == 0
ltime = list(ltime)
ltime[0] = -1
- raises(ValueError, rctime.mktime, tuple(ltime))
- rctime.accept2dyear == 1
+ raises(OverflowError, rctime.mktime, tuple(ltime))
- ltime = list(ltime)
- ltime[0] = 67
- ltime = tuple(ltime)
- if os.name != "nt" and sys.maxsize < 1<<32: # time_t may be 64bit
- raises(OverflowError, rctime.mktime, ltime)
-
+ # year cannot be 100
ltime = list(ltime)
ltime[0] = 100
- raises(ValueError, rctime.mktime, tuple(ltime))
+ raises(OverflowError, rctime.mktime, tuple(ltime))
t = rctime.time()
assert int(rctime.mktime(rctime.localtime(t))) == int(t)
@@ -169,28 +162,6 @@
assert asc[-len(str(bigyear)):] == str(bigyear)
raises(OverflowError, rctime.asctime, (bigyear + 1,) + (0,)*8)
- def test_accept2dyear_access(self):
- import time as rctime
-
- accept2dyear = rctime.accept2dyear
- del rctime.accept2dyear
- try:
- # with year >= 1900 this shouldn't need to access accept2dyear
- assert rctime.asctime((2000,) + (0,) * 8).split()[-1] == '2000'
- finally:
- rctime.accept2dyear = accept2dyear
-
- def test_accept2dyear_bad(self):
- import time as rctime
- class X:
- def __bool__(self):
- raise RuntimeError('boo')
- orig, rctime.accept2dyear = rctime.accept2dyear, X()
- try:
- raises(RuntimeError, rctime.asctime, (200,) + (0,) * 8)
- finally:
- rctime.accept2dyear = orig
-
def test_struct_time(self):
import time as rctime
raises(TypeError, rctime.struct_time)
@@ -281,7 +252,7 @@
raises(TypeError, rctime.strftime, ())
raises(TypeError, rctime.strftime, (1,))
raises(TypeError, rctime.strftime, range(8))
- exp = '2000 01 01 00 00 00 1 001'
+ exp = '0 01 01 00 00 00 1 001'
assert rctime.strftime("%Y %m %d %H %M %S %w %j", (0,)*9) == exp
# Guard against invalid/non-supported format string
@@ -296,6 +267,23 @@
else:
assert rctime.strftime('%f') == '%f'
+ def test_strftime_y2k(self):
+ '''Port of cpython's datetimetester.test_strftime_y2k.'''
+ import time as rctime
+
+ ltime = list(rctime.gmtime())
+ for y in (1, 49, 70, 99, 100, 999, 1000, 1970):
+ ltime[0] = y
+
+ def fmt(template):
+ return rctime.strftime(template, tuple(ltime))
+
+ if fmt('%Y') != '%04d' % y:
+ # Year 42 returns '42', not padded
+ assert fmt("%Y") == '%d' % y
+ # '0042' is obtained anyway
+ assert fmt("%4Y") == '%04d' % y
+
def test_strftime_ext(self):
import time as rctime
@@ -314,9 +302,6 @@
# of the time tuple.
# check year
- if rctime.accept2dyear:
- raises(ValueError, rctime.strftime, '', (-1, 1, 1, 0, 0, 0, 0, 1, -1))
- raises(ValueError, rctime.strftime, '', (100, 1, 1, 0, 0, 0, 0, 1, -1))
rctime.strftime('', (1899, 1, 1, 0, 0, 0, 0, 1, -1))
rctime.strftime('', (0, 1, 1, 0, 0, 0, 0, 1, -1))
From noreply at buildbot.pypy.org Tue Aug 19 22:14:06 2014
From: noreply at buildbot.pypy.org (numerodix)
Date: Tue, 19 Aug 2014 22:14:06 +0200 (CEST)
Subject: [pypy-commit] pypy py3.3: Merged in numerodix/pypy/py3.3-fixes2
(pull request #271)
Message-ID: <20140819201406.9D6961C14FF@cobra.cs.uni-duesseldorf.de>
Author: Martin Matusiak
Branch: py3.3
Changeset: r72917:6fcd3a9ba6e2
Date: 2014-08-19 22:13 +0200
http://bitbucket.org/pypy/pypy/changeset/6fcd3a9ba6e2/
Log: Merged in numerodix/pypy/py3.3-fixes2 (pull request #271)
remove time.accept2dyear (removed in python 3.3)
diff --git a/pypy/module/rctime/__init__.py b/pypy/module/rctime/__init__.py
--- a/pypy/module/rctime/__init__.py
+++ b/pypy/module/rctime/__init__.py
@@ -39,5 +39,3 @@
from pypy.module.rctime import interp_time
interp_time._init_timezone(space)
- interp_time._init_accept2dyear(space)
-
diff --git a/pypy/module/rctime/interp_time.py b/pypy/module/rctime/interp_time.py
--- a/pypy/module/rctime/interp_time.py
+++ b/pypy/module/rctime/interp_time.py
@@ -198,13 +198,6 @@
c_strftime = external('strftime', [rffi.CCHARP, rffi.SIZE_T, rffi.CCHARP, TM_P],
rffi.SIZE_T)
-def _init_accept2dyear(space):
- if os.environ.get("PYTHONY2K"):
- accept2dyear = 0
- else:
- accept2dyear = 1
- _set_module_object(space, "accept2dyear", space.wrap(accept2dyear))
-
def _init_timezone(space):
timezone = daylight = altzone = 0
tzname = ["", ""]
@@ -435,21 +428,6 @@
glob_buf.c_tm_zone = lltype.nullptr(rffi.CCHARP.TO)
rffi.setintfield(glob_buf, 'c_tm_gmtoff', 0)
- if y < 1000:
- w_accept2dyear = _get_module_object(space, "accept2dyear")
- accept2dyear = space.is_true(w_accept2dyear)
-
- if accept2dyear:
- if 69 <= y <= 99:
- y += 1900
- elif 0 <= y <= 68:
- y += 2000
- else:
- raise OperationError(space.w_ValueError,
- space.wrap("year out of range"))
- space.warn(space.wrap("Century info guessed for a 2-digit year."),
- space.w_DeprecationWarning)
-
# tm_wday does not need checking of its upper-bound since taking "%
# 7" in _gettmarg() automatically restricts the range.
if rffi.getintfield(glob_buf, 'c_tm_wday') < -1:
diff --git a/pypy/module/rctime/test/test_rctime.py b/pypy/module/rctime/test/test_rctime.py
--- a/pypy/module/rctime/test/test_rctime.py
+++ b/pypy/module/rctime/test/test_rctime.py
@@ -5,7 +5,6 @@
def test_attributes(self):
import time as rctime
- assert isinstance(rctime.accept2dyear, int)
assert isinstance(rctime.altzone, int)
assert isinstance(rctime.daylight, int)
assert isinstance(rctime.timezone, int)
@@ -101,22 +100,16 @@
res = rctime.mktime(rctime.localtime())
assert isinstance(res, float)
+ # year cannot be -1
ltime = rctime.localtime()
- rctime.accept2dyear == 0
ltime = list(ltime)
ltime[0] = -1
- raises(ValueError, rctime.mktime, tuple(ltime))
- rctime.accept2dyear == 1
+ raises(OverflowError, rctime.mktime, tuple(ltime))
- ltime = list(ltime)
- ltime[0] = 67
- ltime = tuple(ltime)
- if os.name != "nt" and sys.maxsize < 1<<32: # time_t may be 64bit
- raises(OverflowError, rctime.mktime, ltime)
-
+ # year cannot be 100
ltime = list(ltime)
ltime[0] = 100
- raises(ValueError, rctime.mktime, tuple(ltime))
+ raises(OverflowError, rctime.mktime, tuple(ltime))
t = rctime.time()
assert int(rctime.mktime(rctime.localtime(t))) == int(t)
@@ -169,28 +162,6 @@
assert asc[-len(str(bigyear)):] == str(bigyear)
raises(OverflowError, rctime.asctime, (bigyear + 1,) + (0,)*8)
- def test_accept2dyear_access(self):
- import time as rctime
-
- accept2dyear = rctime.accept2dyear
- del rctime.accept2dyear
- try:
- # with year >= 1900 this shouldn't need to access accept2dyear
- assert rctime.asctime((2000,) + (0,) * 8).split()[-1] == '2000'
- finally:
- rctime.accept2dyear = accept2dyear
-
- def test_accept2dyear_bad(self):
- import time as rctime
- class X:
- def __bool__(self):
- raise RuntimeError('boo')
- orig, rctime.accept2dyear = rctime.accept2dyear, X()
- try:
- raises(RuntimeError, rctime.asctime, (200,) + (0,) * 8)
- finally:
- rctime.accept2dyear = orig
-
def test_struct_time(self):
import time as rctime
raises(TypeError, rctime.struct_time)
@@ -281,7 +252,7 @@
raises(TypeError, rctime.strftime, ())
raises(TypeError, rctime.strftime, (1,))
raises(TypeError, rctime.strftime, range(8))
- exp = '2000 01 01 00 00 00 1 001'
+ exp = '0 01 01 00 00 00 1 001'
assert rctime.strftime("%Y %m %d %H %M %S %w %j", (0,)*9) == exp
# Guard against invalid/non-supported format string
@@ -296,6 +267,23 @@
else:
assert rctime.strftime('%f') == '%f'
+ def test_strftime_y2k(self):
+ '''Port of cpython's datetimetester.test_strftime_y2k.'''
+ import time as rctime
+
+ ltime = list(rctime.gmtime())
+ for y in (1, 49, 70, 99, 100, 999, 1000, 1970):
+ ltime[0] = y
+
+ def fmt(template):
+ return rctime.strftime(template, tuple(ltime))
+
+ if fmt('%Y') != '%04d' % y:
+ # Year 42 returns '42', not padded
+ assert fmt("%Y") == '%d' % y
+ # '0042' is obtained anyway
+ assert fmt("%4Y") == '%04d' % y
+
def test_strftime_ext(self):
import time as rctime
@@ -314,9 +302,6 @@
# of the time tuple.
# check year
- if rctime.accept2dyear:
- raises(ValueError, rctime.strftime, '', (-1, 1, 1, 0, 0, 0, 0, 1, -1))
- raises(ValueError, rctime.strftime, '', (100, 1, 1, 0, 0, 0, 0, 1, -1))
rctime.strftime('', (1899, 1, 1, 0, 0, 0, 0, 1, -1))
rctime.strftime('', (0, 1, 1, 0, 0, 0, 0, 1, -1))
From noreply at buildbot.pypy.org Wed Aug 20 09:32:11 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Wed, 20 Aug 2014 09:32:11 +0200 (CEST)
Subject: [pypy-commit] pypy default: Issue #1849: fix for str.split(),
str.rsplit()
Message-ID: <20140820073211.38EA41C1482@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r72918:b9445a658af8
Date: 2014-08-20 09:31 +0200
http://bitbucket.org/pypy/pypy/changeset/b9445a658af8/
Log: Issue #1849: fix for str.split(), str.rsplit()
diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py
--- a/pypy/objspace/std/bytesobject.py
+++ b/pypy/objspace/std/bytesobject.py
@@ -731,6 +731,22 @@
return space.wrap(self._val(space).join(l))
return self._StringMethods_descr_join(space, w_list)
+ _StringMethods_descr_split = descr_split
+ @unwrap_spec(maxsplit=int)
+ def descr_split(self, space, w_sep=None, maxsplit=-1):
+ if w_sep is not None and space.isinstance_w(w_sep, space.w_unicode):
+ self_as_uni = unicode_from_encoded_object(space, self, None, None)
+ return self_as_uni.descr_split(space, w_sep, maxsplit)
+ return self._StringMethods_descr_split(space, w_sep, maxsplit)
+
+ _StringMethods_descr_rsplit = descr_rsplit
+ @unwrap_spec(maxsplit=int)
+ def descr_rsplit(self, space, w_sep=None, maxsplit=-1):
+ if w_sep is not None and space.isinstance_w(w_sep, space.w_unicode):
+ self_as_uni = unicode_from_encoded_object(space, self, None, None)
+ return self_as_uni.descr_rsplit(space, w_sep, maxsplit)
+ return self._StringMethods_descr_rsplit(space, w_sep, maxsplit)
+
def _join_return_one(self, space, w_obj):
return (space.is_w(space.type(w_obj), space.w_str) or
space.is_w(space.type(w_obj), space.w_unicode))
diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py
--- a/pypy/objspace/std/test/test_unicodeobject.py
+++ b/pypy/objspace/std/test/test_unicodeobject.py
@@ -175,6 +175,24 @@
assert u' a b c '.rsplit(None, 0) == [u' a b c']
assert u''.rsplit('aaa') == [u'']
+ def test_split_rsplit_str_unicode(self):
+ x = 'abc'.split(u'b')
+ assert x == [u'a', u'c']
+ assert map(type, x) == [unicode, unicode]
+ x = 'abc'.rsplit(u'b')
+ assert x == [u'a', u'c']
+ assert map(type, x) == [unicode, unicode]
+ x = 'abc'.split(u'\u4321')
+ assert x == [u'abc']
+ assert map(type, x) == [unicode]
+ x = 'abc'.rsplit(u'\u4321')
+ assert x == [u'abc']
+ assert map(type, x) == [unicode]
+ raises(UnicodeDecodeError, '\x80'.split, u'a')
+ raises(UnicodeDecodeError, '\x80'.split, u'')
+ raises(UnicodeDecodeError, '\x80'.rsplit, u'a')
+ raises(UnicodeDecodeError, '\x80'.rsplit, u'')
+
def test_center(self):
s=u"a b"
assert s.center(0) == u"a b"
From noreply at buildbot.pypy.org Wed Aug 20 09:54:00 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Wed, 20 Aug 2014 09:54:00 +0200 (CEST)
Subject: [pypy-commit] pypy default: str.strip(), str.lstrip(), str.rstrip()
Message-ID: <20140820075400.53C421C11B8@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r72919:0ff8cabf7240
Date: 2014-08-20 09:36 +0200
http://bitbucket.org/pypy/pypy/changeset/0ff8cabf7240/
Log: str.strip(), str.lstrip(), str.rstrip()
diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py
--- a/pypy/objspace/std/bytesobject.py
+++ b/pypy/objspace/std/bytesobject.py
@@ -747,6 +747,27 @@
return self_as_uni.descr_rsplit(space, w_sep, maxsplit)
return self._StringMethods_descr_rsplit(space, w_sep, maxsplit)
+ _StringMethods_descr_strip = descr_strip
+ def descr_strip(self, space, w_chars=None):
+ if w_chars is not None and space.isinstance_w(w_chars, space.w_unicode):
+ self_as_uni = unicode_from_encoded_object(space, self, None, None)
+ return self_as_uni.descr_strip(space, w_chars)
+ return self._StringMethods_descr_strip(space, w_chars)
+
+ _StringMethods_descr_lstrip = descr_lstrip
+ def descr_lstrip(self, space, w_chars=None):
+ if w_chars is not None and space.isinstance_w(w_chars, space.w_unicode):
+ self_as_uni = unicode_from_encoded_object(space, self, None, None)
+ return self_as_uni.descr_lstrip(space, w_chars)
+ return self._StringMethods_descr_lstrip(space, w_chars)
+
+ _StringMethods_descr_rstrip = descr_rstrip
+ def descr_rstrip(self, space, w_chars=None):
+ if w_chars is not None and space.isinstance_w(w_chars, space.w_unicode):
+ self_as_uni = unicode_from_encoded_object(space, self, None, None)
+ return self_as_uni.descr_rstrip(space, w_chars)
+ return self._StringMethods_descr_rstrip(space, w_chars)
+
def _join_return_one(self, space, w_obj):
return (space.is_w(space.type(w_obj), space.w_str) or
space.is_w(space.type(w_obj), space.w_unicode))
diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py
--- a/pypy/objspace/std/test/test_unicodeobject.py
+++ b/pypy/objspace/std/test/test_unicodeobject.py
@@ -333,6 +333,17 @@
assert u'xyzzyhelloxyzzy'.lstrip('xyz') == u'helloxyzzy'
assert u'xyzzyhelloxyzzy'.rstrip(u'xyz') == u'xyzzyhello'
+ def test_strip_str_unicode(self):
+ x = "--abc--".strip(u"-")
+ assert (x, type(x)) == (u"abc", unicode)
+ x = "--abc--".lstrip(u"-")
+ assert (x, type(x)) == (u"abc--", unicode)
+ x = "--abc--".rstrip(u"-")
+ assert (x, type(x)) == (u"--abc", unicode)
+ raises(UnicodeDecodeError, "\x80".strip, u"")
+ raises(UnicodeDecodeError, "\x80".lstrip, u"")
+ raises(UnicodeDecodeError, "\x80".rstrip, u"")
+
def test_long_from_unicode(self):
assert long(u'12345678901234567890') == 12345678901234567890
assert int(u'12345678901234567890') == 12345678901234567890
From noreply at buildbot.pypy.org Wed Aug 20 09:54:01 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Wed, 20 Aug 2014 09:54:01 +0200 (CEST)
Subject: [pypy-commit] pypy default: str.count()
Message-ID: <20140820075401.86A031C11B8@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r72920:4533e4236ff1
Date: 2014-08-20 09:39 +0200
http://bitbucket.org/pypy/pypy/changeset/4533e4236ff1/
Log: str.count()
diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py
--- a/pypy/objspace/std/bytesobject.py
+++ b/pypy/objspace/std/bytesobject.py
@@ -768,6 +768,13 @@
return self_as_uni.descr_rstrip(space, w_chars)
return self._StringMethods_descr_rstrip(space, w_chars)
+ _StringMethods_descr_count = descr_count
+ def descr_count(self, space, w_sub, w_start=None, w_end=None):
+ if space.isinstance_w(w_sub, space.w_unicode):
+ self_as_uni = unicode_from_encoded_object(space, self, None, None)
+ return self_as_uni.descr_count(space, w_sub, w_start, w_end)
+ return self._StringMethods_descr_count(space, w_sub, w_start, w_end)
+
def _join_return_one(self, space, w_obj):
return (space.is_w(space.type(w_obj), space.w_str) or
space.is_w(space.type(w_obj), space.w_unicode))
diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py
--- a/pypy/objspace/std/test/test_unicodeobject.py
+++ b/pypy/objspace/std/test/test_unicodeobject.py
@@ -680,13 +680,23 @@
assert u"".count(u"") ==1
assert u"Python".count(u"") ==7
assert u"ab aaba".count(u"ab") ==2
- assert 'aaa'.count('a') == 3
- assert 'aaa'.count('b') == 0
- assert 'aaa'.count('a', -1) == 1
- assert 'aaa'.count('a', -10) == 3
- assert 'aaa'.count('a', 0, -1) == 2
- assert 'aaa'.count('a', 0, -10) == 0
- assert 'ababa'.count('aba') == 1
+ assert u'aaa'.count(u'a') == 3
+ assert u'aaa'.count(u'b') == 0
+ assert u'aaa'.count(u'a', -1) == 1
+ assert u'aaa'.count(u'a', -10) == 3
+ assert u'aaa'.count(u'a', 0, -1) == 2
+ assert u'aaa'.count(u'a', 0, -10) == 0
+ assert u'ababa'.count(u'aba') == 1
+
+ def test_count_str_unicode(self):
+ assert 'aaa'.count(u'a') == 3
+ assert 'aaa'.count(u'b') == 0
+ assert 'aaa'.count(u'a', -1) == 1
+ assert 'aaa'.count(u'a', -10) == 3
+ assert 'aaa'.count(u'a', 0, -1) == 2
+ assert 'aaa'.count(u'a', 0, -10) == 0
+ assert 'ababa'.count(u'aba') == 1
+ raises(UnicodeDecodeError, '\x80'.count, u'')
def test_swapcase(self):
assert u'\xe4\xc4\xdf'.swapcase() == u'\xc4\xe4\xdf'
From noreply at buildbot.pypy.org Wed Aug 20 09:54:02 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Wed, 20 Aug 2014 09:54:02 +0200 (CEST)
Subject: [pypy-commit] pypy default: str.find(), str.rfind(), str.index(),
str.rindex()
Message-ID: <20140820075402.CC7E01C11B8@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r72921:27305d2b0ff9
Date: 2014-08-20 09:43 +0200
http://bitbucket.org/pypy/pypy/changeset/27305d2b0ff9/
Log: str.find(), str.rfind(), str.index(), str.rindex()
diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py
--- a/pypy/objspace/std/bytesobject.py
+++ b/pypy/objspace/std/bytesobject.py
@@ -775,6 +775,34 @@
return self_as_uni.descr_count(space, w_sub, w_start, w_end)
return self._StringMethods_descr_count(space, w_sub, w_start, w_end)
+ _StringMethods_descr_find = descr_find
+ def descr_find(self, space, w_sub, w_start=None, w_end=None):
+ if space.isinstance_w(w_sub, space.w_unicode):
+ self_as_uni = unicode_from_encoded_object(space, self, None, None)
+ return self_as_uni.descr_find(space, w_sub, w_start, w_end)
+ return self._StringMethods_descr_find(space, w_sub, w_start, w_end)
+
+ _StringMethods_descr_rfind = descr_rfind
+ def descr_rfind(self, space, w_sub, w_start=None, w_end=None):
+ if space.isinstance_w(w_sub, space.w_unicode):
+ self_as_uni = unicode_from_encoded_object(space, self, None, None)
+ return self_as_uni.descr_rfind(space, w_sub, w_start, w_end)
+ return self._StringMethods_descr_rfind(space, w_sub, w_start, w_end)
+
+ _StringMethods_descr_index = descr_index
+ def descr_index(self, space, w_sub, w_start=None, w_end=None):
+ if space.isinstance_w(w_sub, space.w_unicode):
+ self_as_uni = unicode_from_encoded_object(space, self, None, None)
+ return self_as_uni.descr_index(space, w_sub, w_start, w_end)
+ return self._StringMethods_descr_index(space, w_sub, w_start, w_end)
+
+ _StringMethods_descr_rindex = descr_rindex
+ def descr_rindex(self, space, w_sub, w_start=None, w_end=None):
+ if space.isinstance_w(w_sub, space.w_unicode):
+ self_as_uni = unicode_from_encoded_object(space, self, None, None)
+ return self_as_uni.descr_rindex(space, w_sub, w_start, w_end)
+ return self._StringMethods_descr_rindex(space, w_sub, w_start, w_end)
+
def _join_return_one(self, space, w_obj):
return (space.is_w(space.type(w_obj), space.w_str) or
space.is_w(space.type(w_obj), space.w_unicode))
diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py
--- a/pypy/objspace/std/test/test_unicodeobject.py
+++ b/pypy/objspace/std/test/test_unicodeobject.py
@@ -675,6 +675,16 @@
def test_rfind_corner_case(self):
assert u'abc'.rfind('', 4) == -1
+ def test_find_index_str_unicode(self):
+ assert 'abcdefghiabc'.find(u'bc') == 1
+ assert 'abcdefghiabc'.rfind(u'abc') == 9
+ raises(UnicodeDecodeError, '\x80'.find, u'')
+ raises(UnicodeDecodeError, '\x80'.rfind, u'')
+ assert 'abcdefghiabc'.index(u'bc') == 1
+ assert 'abcdefghiabc'.rindex(u'abc') == 9
+ raises(UnicodeDecodeError, '\x80'.index, u'')
+ raises(UnicodeDecodeError, '\x80'.rindex, u'')
+
def test_count(self):
assert u"".count(u"x") ==0
assert u"".count(u"") ==1
From noreply at buildbot.pypy.org Wed Aug 20 09:54:04 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Wed, 20 Aug 2014 09:54:04 +0200 (CEST)
Subject: [pypy-commit] pypy default: str.replace()
Message-ID: <20140820075404.13D7F1C11B8@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r72922:864226266f8c
Date: 2014-08-20 09:46 +0200
http://bitbucket.org/pypy/pypy/changeset/864226266f8c/
Log: str.replace()
diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py
--- a/pypy/objspace/std/bytesobject.py
+++ b/pypy/objspace/std/bytesobject.py
@@ -707,19 +707,7 @@
new_is_unicode = space.isinstance_w(w_new, space.w_unicode)
if old_is_unicode or new_is_unicode:
self_as_uni = unicode_from_encoded_object(space, self, None, None)
- if not old_is_unicode:
- w_old = unicode_from_encoded_object(space, w_old, None, None)
- if not new_is_unicode:
- w_new = unicode_from_encoded_object(space, w_new, None, None)
- input = self_as_uni._val(space)
- sub = self_as_uni._op_val(space, w_old)
- by = self_as_uni._op_val(space, w_new)
- try:
- res = replace(input, sub, by, count)
- except OverflowError:
- raise oefmt(space.w_OverflowError,
- "replace string is too long")
- return self_as_uni._new(res)
+ return self_as_uni.descr_replace(space, w_old, w_new, count)
return self._StringMethods_descr_replace(space, w_old, w_new, count)
_StringMethods_descr_join = descr_join
diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py
--- a/pypy/objspace/std/test/test_unicodeobject.py
+++ b/pypy/objspace/std/test/test_unicodeobject.py
@@ -956,10 +956,12 @@
assert not u'a'.isnumeric()
assert u'\u2460'.isnumeric() # CIRCLED DIGIT ONE
- def test_replace_autoconvert(self):
+ def test_replace_str_unicode(self):
res = 'one!two!three!'.replace(u'!', u'@', 1)
assert res == u'one at two!three!'
assert type(res) == unicode
+ raises(UnicodeDecodeError, '\x80'.replace, 'a', u'b')
+ raises(UnicodeDecodeError, '\x80'.replace, u'a', 'b')
def test_join_subclass(self):
class UnicodeSubclass(unicode):
From noreply at buildbot.pypy.org Wed Aug 20 09:54:05 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Wed, 20 Aug 2014 09:54:05 +0200 (CEST)
Subject: [pypy-commit] pypy default: str.partition(), str.rpartition()
Message-ID: <20140820075405.5A9751C11B8@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r72923:f7a311fdf0ac
Date: 2014-08-20 09:50 +0200
http://bitbucket.org/pypy/pypy/changeset/f7a311fdf0ac/
Log: str.partition(), str.rpartition()
diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py
--- a/pypy/objspace/std/bytesobject.py
+++ b/pypy/objspace/std/bytesobject.py
@@ -791,6 +791,20 @@
return self_as_uni.descr_rindex(space, w_sub, w_start, w_end)
return self._StringMethods_descr_rindex(space, w_sub, w_start, w_end)
+ _StringMethods_descr_partition = descr_partition
+ def descr_partition(self, space, w_sub):
+ if space.isinstance_w(w_sub, space.w_unicode):
+ self_as_uni = unicode_from_encoded_object(space, self, None, None)
+ return self_as_uni.descr_partition(space, w_sub)
+ return self._StringMethods_descr_partition(space, w_sub)
+
+ _StringMethods_descr_rpartition = descr_rpartition
+ def descr_rpartition(self, space, w_sub):
+ if space.isinstance_w(w_sub, space.w_unicode):
+ self_as_uni = unicode_from_encoded_object(space, self, None, None)
+ return self_as_uni.descr_rpartition(space, w_sub)
+ return self._StringMethods_descr_rpartition(space, w_sub)
+
def _join_return_one(self, space, w_obj):
return (space.is_w(space.type(w_obj), space.w_str) or
space.is_w(space.type(w_obj), space.w_unicode))
diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py
--- a/pypy/objspace/std/test/test_unicodeobject.py
+++ b/pypy/objspace/std/test/test_unicodeobject.py
@@ -628,6 +628,13 @@
raises(ValueError, S.rpartition, u'')
raises(TypeError, S.rpartition, None)
+ def test_partition_str_unicode(self):
+ x = 'abbbd'.rpartition(u'bb')
+ assert x == (u'ab', u'bb', u'd')
+ assert map(type, x) == [unicode, unicode, unicode]
+ raises(UnicodeDecodeError, '\x80'.partition, u'')
+ raises(UnicodeDecodeError, '\x80'.rpartition, u'')
+
def test_mul(self):
zero = 0
assert type(u'' * zero) == type(zero * u'') == unicode
From noreply at buildbot.pypy.org Wed Aug 20 12:35:37 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Wed, 20 Aug 2014 12:35:37 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7: Don't insert the dummy malloc between
the final GUARD_NOT_FORCED_2
Message-ID: <20140820103537.664F61C1489@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch: stmgc-c7
Changeset: r72924:dacc3c52da1f
Date: 2014-08-20 12:35 +0200
http://bitbucket.org/pypy/pypy/changeset/dacc3c52da1f/
Log: Don't insert the dummy malloc between the final GUARD_NOT_FORCED_2
and the following FINISH!
diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py
--- a/rpython/jit/backend/llsupport/stmrewrite.py
+++ b/rpython/jit/backend/llsupport/stmrewrite.py
@@ -27,6 +27,12 @@
self._do_stm_call('stm_hint_commit_soon', [], None,
op.stm_location)
return
+ # ---------- jump, finish, guard_not_forced_2 ----------
+ if (opnum == rop.JUMP or opnum == rop.FINISH
+ or opnum == rop.GUARD_NOT_FORCED_2):
+ self.add_dummy_allocation()
+ self.newops.append(op)
+ return
# ---------- pure operations, guards ----------
if op.is_always_pure() or op.is_guard() or op.is_ovf():
self.newops.append(op)
@@ -84,11 +90,6 @@
):
self.newops.append(op)
return
- # ---------- jump, finish ----------
- if opnum == rop.JUMP or opnum == rop.FINISH:
- self.add_dummy_allocation()
- self.newops.append(op)
- return
# ---------- fall-back ----------
# Check that none of the ops handled here can collect.
# This is not done by the fallback here
diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py
--- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py
+++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py
@@ -1322,3 +1322,18 @@
$DUMMYALLOC
jump(i1)
""")
+
+ def test_dummy_alloc_is_before_guard_not_forced_2(self):
+ self.check_rewrite("""
+ []
+ escape()
+ guard_not_forced_2() []
+ finish()
+ """, """
+ []
+ $INEV
+ escape()
+ $DUMMYALLOC
+ guard_not_forced_2() []
+ finish()
+ """)
From noreply at buildbot.pypy.org Wed Aug 20 13:02:51 2014
From: noreply at buildbot.pypy.org (Raemi)
Date: Wed, 20 Aug 2014 13:02:51 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7: port the write barrier placement code
from c4 to place read barriers in c7 more
Message-ID: <20140820110251.46D7D1C14FF@cobra.cs.uni-duesseldorf.de>
Author: Remi Meier
Branch: stmgc-c7
Changeset: r72925:5f2375647996
Date: 2014-08-20 10:51 +0200
http://bitbucket.org/pypy/pypy/changeset/5f2375647996/
Log: port the write barrier placement code from c4 to place read barriers
in c7 more intelligently
diff --git a/rpython/translator/stm/breakfinder.py b/rpython/translator/stm/breakfinder.py
--- a/rpython/translator/stm/breakfinder.py
+++ b/rpython/translator/stm/breakfinder.py
@@ -9,6 +9,7 @@
#'jit_assembler_call',
'stm_enter_callback_call',
'stm_leave_callback_call',
+ 'stm_transaction_break',
])
for tb in TRANSACTION_BREAK:
diff --git a/rpython/translator/stm/readbarrier.py b/rpython/translator/stm/readbarrier.py
--- a/rpython/translator/stm/readbarrier.py
+++ b/rpython/translator/stm/readbarrier.py
@@ -1,33 +1,54 @@
from rpython.flowspace.model import SpaceOperation, Constant, Variable
-from rpython.translator.unsimplify import varoftype
+from rpython.translator.unsimplify import varoftype, insert_empty_block, insert_empty_startblock
from rpython.rtyper.lltypesystem import lltype
from rpython.translator.stm.support import is_immutable
+from rpython.translator.simplify import join_blocks
-
+MALLOCS = set([
+ 'malloc', 'malloc_varsize',
+ 'malloc_nonmovable', 'malloc_nonmovable_varsize',
+ ])
READ_OPS = set(['getfield', 'getarrayitem', 'getinteriorfield', 'raw_load'])
+
+
+def needs_barrier(frm, to):
+ return to > frm
+
def is_gc_ptr(T):
return isinstance(T, lltype.Ptr) and T.TO._gckind == 'gc'
+class Renaming(object):
+ def __init__(self, newvar, category):
+ self.newvar = newvar # a Variable or a Constant
+ self.TYPE = newvar.concretetype
+ self.category = category
-def insert_stm_read_barrier(transformer, graph):
- # We need to put enough 'stm_read' in the graph so that any
- # execution of a READ_OP on some GC object is guaranteed to also
- # execute either 'stm_read' or 'stm_write' on the same GC object
- # during the same transaction.
- #
- # XXX this can be optimized a lot, but for now we go with the
- # simplest possible solution...
- #
- gcremovetypeptr = transformer.translator.config.translation.gcremovetypeptr
- for block in graph.iterblocks():
- if not block.operations:
- continue
- newops = []
+
+class BlockTransformer(object):
+
+ def __init__(self, stmtransformer, block):
+ self.stmtransformer = stmtransformer
+ self.block = block
+ self.patch = None
+ self.inputargs_category = None
+ self.inputargs_category_per_link = {}
+
+ def init_start_block(self):
+ # all input args have category "any"
+ from_outside = ['A'] * len(self.block.inputargs)
+ self.inputargs_category_per_link[None] = from_outside
+ self.update_inputargs_category()
+
+
+ def analyze_inside_block(self, graph):
+ gcremovetypeptr = self.stmtransformer.translator.config.translation.gcremovetypeptr
+
+ wants_a_barrier = {}
stm_ignored = False
- for op in block.operations:
+ for op in self.block.operations:
is_getter = (op.opname in READ_OPS and
op.result.concretetype is not lltype.Void and
is_gc_ptr(op.args[0].concretetype))
@@ -38,25 +59,239 @@
# typeptr is always immutable
pass
elif ((op.opname in ('getarraysize', 'getinteriorarraysize') and
- is_gc_ptr(op.args[0].concretetype)) or
+ is_gc_ptr(op.args[0].concretetype)) or
(is_getter and is_immutable(op))):
# immutable getters
+ pass
+ elif is_getter:
+ if not stm_ignored:
+ wants_a_barrier[op] = 'R'
+ elif op.opname == 'weakref_deref':
# 'weakref_deref': kind of immutable, but the GC has to see
# which transactions read from a dying weakref, so we
# need the barrier nonetheless...
- pass
- elif is_getter:
- if not stm_ignored:
- v_none = varoftype(lltype.Void)
- newops.append(SpaceOperation('stm_read',
- [op.args[0]], v_none))
- transformer.read_barrier_counts += 1
+ wants_a_barrier[op] = 'R'
elif op.opname == 'stm_ignored_start':
- assert stm_ignored == False
+ assert not stm_ignored, "nested 'with stm_ignored'"
stm_ignored = True
elif op.opname == 'stm_ignored_stop':
- assert stm_ignored == True
+ assert stm_ignored, "stm_ignored_stop without start?"
stm_ignored = False
- newops.append(op)
- assert stm_ignored == False
- block.operations = newops
+
+ if stm_ignored and op in wants_a_barrier:
+ assert wants_a_barrier[op] == 'R'
+ if is_getter and is_gc_ptr(op.result.concretetype):
+ raise Exception(
+ "%r: 'with stm_ignored:' contains unsupported "
+ "operation %r reading a GC pointer" % (graph, op))
+ #
+ if stm_ignored:
+ raise Exception("%r: 'with stm_ignored:' code body too complex"
+ % (graph,))
+ self.wants_a_barrier = wants_a_barrier
+
+
+ def flow_through_block(self):
+
+ def renfetch(v):
+ try:
+ return renamings[v]
+ except KeyError:
+ ren = Renaming(v, 'A')
+ renamings[v] = ren
+ return ren
+
+ def get_category_or_null(v):
+ # 'v' is an original variable here, or a constant
+ if isinstance(v, Constant) and not v.value: # a NULL constant
+ return 'Z'
+ if v in renamings:
+ return renamings[v].category
+ if isinstance(v, Constant):
+ return 'R'
+ else:
+ return 'A'
+
+ def renamings_get(v):
+ try:
+ ren = renamings[v]
+ except KeyError:
+ return v # unmodified
+ v2 = ren.newvar
+ if v2.concretetype == v.concretetype:
+ return v2
+ v3 = varoftype(v.concretetype)
+ newoperations.append(SpaceOperation('cast_pointer', [v2], v3))
+ if lltype.castable(ren.TYPE, v3.concretetype) > 0:
+ ren.TYPE = v3.concretetype
+ return v3
+
+ # note: 'renamings' maps old vars to new vars, but cast_pointers
+ # are done lazily. It means that the two vars may not have
+ # exactly the same type.
+ renamings = {} # {original-var: Renaming(newvar, category)}
+ newoperations = []
+ stmtransformer = self.stmtransformer
+
+ # make the initial trivial renamings needed to have some precise
+ # categories for the input args
+ for v, cat in zip(self.block.inputargs, self.inputargs_category):
+ if is_gc_ptr(v.concretetype):
+ assert cat is not None
+ renamings[v] = Renaming(v, cat)
+
+ for op in self.block.operations:
+ #
+ if (op.opname in ('cast_pointer', 'same_as') and
+ is_gc_ptr(op.result.concretetype)):
+ renamings[op.result] = renfetch(op.args[0])
+ continue
+ #
+ to = self.wants_a_barrier.get(op)
+ if to is not None:
+ ren = renfetch(op.args[0])
+ frm = ren.category
+ if needs_barrier(frm, to):
+ stmtransformer.read_barrier_counts += 1
+ v_none = varoftype(lltype.Void)
+ newoperations.append(
+ SpaceOperation('stm_read', [ren.newvar], v_none))
+ ren.category = to
+ #
+ # XXX: from c4: we can probably just append the original op
+ newop = SpaceOperation(op.opname,
+ [renamings_get(v) for v in op.args],
+ op.result)
+ newoperations.append(newop)
+ #
+ if (stmtransformer.break_analyzer.analyze(op)
+ or op.opname == 'debug_stm_flush_barrier'):
+ # this operation can perform a transaction break:
+ # all pointers are lowered to 'A'
+ for ren in renamings.values():
+ ren.category = 'A'
+ #
+ if op.opname in MALLOCS:
+ assert op.result not in renamings
+ renamings[op.result] = Renaming(op.result, 'R')
+ #
+ if op.opname in ('setfield', 'setarrayitem', 'setinteriorfield',
+ 'raw_store'):
+ # compare with logic in stmframework.py
+ # ops that need a write barrier also make the var 'R'
+ if (op.args[-1].concretetype is not lltype.Void
+ and is_gc_ptr(op.args[0].concretetype)):
+ renfetch(op.args[0]).category = 'R'
+
+ if isinstance(self.block.exitswitch, Variable):
+ switchv = renamings_get(self.block.exitswitch)
+ else:
+ switchv = None
+ blockoperations = newoperations
+ linkoperations = []
+ for link in self.block.exits:
+ output_categories = []
+ for v in link.args:
+ if is_gc_ptr(v.concretetype):
+ cat = get_category_or_null(v)
+ else:
+ cat = None
+ output_categories.append(cat)
+ newoperations = []
+ newargs = [renamings_get(v) for v in link.args]
+ linkoperations.append((newargs, newoperations, output_categories))
+ #
+ # Record how we'd like to patch the block, but don't do any
+ # patching yet
+ self.patch = (blockoperations, switchv, linkoperations)
+
+
+ def update_targets(self, block_transformers):
+ (_, _, linkoperations) = self.patch
+ assert len(linkoperations) == len(self.block.exits)
+ targetbts = []
+ for link, (_, _, output_categories) in zip(self.block.exits,
+ linkoperations):
+ targetblock = link.target
+ if targetblock not in block_transformers:
+ continue # ignore the exit block
+ targetbt = block_transformers[targetblock]
+ targetbt.inputargs_category_per_link[link] = output_categories
+ if targetbt.update_inputargs_category():
+ targetbts.append(targetbt)
+ return set(targetbts)
+
+ def update_inputargs_category(self):
+ values = self.inputargs_category_per_link.values()
+ newcats = []
+ for i, v in enumerate(self.block.inputargs):
+ if is_gc_ptr(v.concretetype):
+ cats = [output_categories[i] for output_categories in values]
+ assert None not in cats
+ newcats.append(min(cats))
+ else:
+ newcats.append(None)
+ if newcats != self.inputargs_category:
+ self.inputargs_category = newcats
+ return True
+ else:
+ return False
+
+
+ def patch_now(self):
+ if self.patch is None:
+ return
+ newoperations, switchv, linkoperations = self.patch
+ self.block.operations = newoperations
+ if switchv is not None:
+ self.block.exitswitch = switchv
+ assert len(linkoperations) == len(self.block.exits)
+ for link, (newargs, newoperations, _) in zip(self.block.exits,
+ linkoperations):
+ link.args[:] = newargs
+ if newoperations:
+ # must put them in a fresh block along the link
+ annotator = self.stmtransformer.translator.annotator
+ newblock = insert_empty_block(annotator, link,
+ newoperations)
+
+
+def insert_stm_read_barrier(stmtransformer, graph):
+ """This function uses the following characters for 'categories':
+
+ * 'A': any general pointer
+ * 'R': the read (or write) barrier was applied
+ * 'Z': the null constant
+
+ The letters are chosen so that a barrier is needed to change a
+ pointer from category x to category y if and only if y > x.
+ """
+ # We need to put enough 'stm_read' in the graph so that any
+ # execution of a READ_OP on some GC object is guaranteed to also
+ # execute either 'stm_read' or 'stm_write' on the same GC object
+ # during the same transaction.
+
+ join_blocks(graph)
+ annotator = stmtransformer.translator.annotator
+ insert_empty_startblock(annotator, graph)
+
+ block_transformers = {}
+
+ for block in graph.iterblocks():
+ if block.operations == ():
+ continue
+ bt = BlockTransformer(stmtransformer, block)
+ bt.analyze_inside_block(graph)
+ block_transformers[block] = bt
+
+ bt = block_transformers[graph.startblock]
+ bt.init_start_block()
+ pending = set([bt])
+
+ while pending:
+ bt = pending.pop()
+ bt.flow_through_block()
+ pending |= bt.update_targets(block_transformers)
+
+ for bt in block_transformers.values():
+ bt.patch_now()
diff --git a/rpython/translator/stm/test/test_readbarrier.py b/rpython/translator/stm/test/test_readbarrier.py
--- a/rpython/translator/stm/test/test_readbarrier.py
+++ b/rpython/translator/stm/test/test_readbarrier.py
@@ -1,6 +1,8 @@
from rpython.rlib.objectmodel import stm_ignored
from rpython.translator.stm.test.transform_support import BaseTestTransform
-from rpython.rtyper.lltypesystem import lltype
+from rpython.rlib.rstm import register_invoke_around_extcall
+from rpython.rtyper.lltypesystem import lltype, rffi
+from rpython.rtyper.lltypesystem.lloperation import llop
class TestReadBarrier(BaseTestTransform):
@@ -26,6 +28,19 @@
assert res == 42
assert self.read_barriers == [x1]
+ def test_simple_read_after_write(self):
+ X = lltype.GcStruct('X', ('foo', lltype.Signed))
+ x1 = lltype.malloc(X, immortal=True)
+ x1.foo = 42
+
+ def f1(n):
+ x1.foo = 7 # write barrier will be done
+ return x1.foo
+
+ res = self.interpret(f1, [4])
+ assert res == 7
+ assert self.read_barriers == [] # implicitly by the write-barrier
+
def test_stm_ignored_read(self):
X = lltype.GcStruct('X', ('foo', lltype.Signed))
x1 = lltype.malloc(X, immortal=True)
@@ -48,3 +63,198 @@
res = self.interpret(f1, [2])
assert res == 42
assert self.read_barriers == [x1]
+
+ def test_array_size(self):
+ array_gc = lltype.GcArray(('z', lltype.Signed))
+ array_nongc = lltype.Array(('z', lltype.Signed))
+ Q = lltype.GcStruct('Q',
+ ('gc', lltype.Ptr(array_gc)),
+ ('raw', lltype.Ptr(array_nongc)))
+ q = lltype.malloc(Q, immortal=True)
+ q.gc = lltype.malloc(array_gc, n=3, flavor='gc', immortal=True)
+ q.raw = lltype.malloc(array_nongc, n=5, flavor='raw', immortal=True)
+ def f1(n):
+ if n == 1:
+ return len(q.gc)
+ else:
+ return len(q.raw)
+ res = self.interpret(f1, [1])
+ assert self.read_barriers == [q]
+ res = self.interpret(f1, [0])
+ assert self.read_barriers == [q]
+
+
+ def test_multiple_reads(self):
+ X = lltype.GcStruct('X', ('foo', lltype.Signed),
+ ('bar', lltype.Signed))
+ x1 = lltype.malloc(X, immortal=True)
+ x1.foo = 6
+ x1.bar = 7
+ x2 = lltype.malloc(X, immortal=True)
+ x2.foo = 81
+ x2.bar = -1
+
+ def f1(n):
+ if n > 1:
+ return x2.foo * x2.bar
+ else:
+ return x1.foo * x1.bar
+
+ res = self.interpret(f1, [4])
+ assert res == -81
+ assert self.read_barriers == [x2]
+
+
+ def test_dont_repeat_read_barrier_after_malloc(self):
+ X = lltype.GcStruct('X', ('foo', lltype.Signed))
+ x1 = lltype.malloc(X, immortal=True, zero=True)
+ def f1(n):
+ t1 = x1.foo
+ lltype.malloc(X)
+ t1 += x1.foo
+ return t1
+
+ self.interpret(f1, [4])
+ assert self.read_barriers == [x1]
+
+ def test_call_external_release_gil(self):
+ X = lltype.GcStruct('X', ('foo', lltype.Signed))
+ def f1(p):
+ register_invoke_around_extcall()
+ x1 = p.foo
+ external_release_gil()
+ x2 = p.foo
+ return x1 * x2
+
+ x = lltype.malloc(X, immortal=True); x.foo = 6
+ res = self.interpret(f1, [x])
+ assert res == 36
+ assert self.read_barriers == [x, x]
+
+ def test_call_external_any_gcobj(self):
+ X = lltype.GcStruct('X', ('foo', lltype.Signed))
+ def f1(p):
+ register_invoke_around_extcall()
+ x1 = p.foo
+ external_any_gcobj()
+ x2 = p.foo
+ return x1 * x2
+
+ x = lltype.malloc(X, immortal=True); x.foo = 6
+ res = self.interpret(f1, [x])
+ assert res == 36
+ assert self.read_barriers == [x]
+
+ def test_call_external_safest(self):
+ X = lltype.GcStruct('X', ('foo', lltype.Signed))
+ def f1(p):
+ register_invoke_around_extcall()
+ x1 = p.foo
+ external_safest()
+ x2 = p.foo
+ return x1 * x2
+
+ x = lltype.malloc(X, immortal=True); x.foo = 6
+ res = self.interpret(f1, [x])
+ assert res == 36
+ assert self.read_barriers == [x]
+
+ def test_simple_loop(self):
+ X = lltype.GcStruct('X', ('foo', lltype.Signed))
+ def f1(x, i):
+ while i > 0:
+ i -= x.foo
+ return i
+ x = lltype.malloc(X, immortal=True); x.foo = 1
+ res = self.interpret(f1, [x, 5])
+ assert res == 0
+ # for now we get this. Later, we could probably optimize it
+ assert self.read_barriers == [x] * 5
+
+
+ def test_read_immutable(self):
+ class Foo:
+ _immutable_ = True
+
+ def f1(n):
+ x = Foo()
+ x.foo = 4
+ llop.debug_stm_flush_barrier(lltype.Void)
+ if n > 1:
+ n = x.foo
+ llop.debug_stm_flush_barrier(lltype.Void)
+ return x.foo + n
+
+ res = self.interpret(f1, [4])
+ assert res == 8
+ assert len(self.read_barriers) == 0
+
+ def test_read_immutable_prebuilt(self):
+ class Foo:
+ _immutable_ = True
+ x1 = Foo()
+ x1.foo = 42
+ x2 = Foo()
+ x2.foo = 81
+
+ def f1(n):
+ if n > 1:
+ return x2.foo
+ else:
+ return x1.foo
+
+ res = self.interpret(f1, [4])
+ assert res == 81
+ assert self.read_barriers == []
+
+ def test_immut_barrier_before_weakref_deref(self):
+ import weakref
+ class Foo:
+ pass
+
+ def f1():
+ x = Foo()
+ w = weakref.ref(x)
+ llop.debug_stm_flush_barrier(lltype.Void)
+ return w()
+
+ self.interpret(f1, [])
+ assert len(self.read_barriers) == 1
+
+
+ def test_transaction_breaking_ops(self):
+ class X:
+ a = 1
+ x = X()
+
+ def f1(f):
+ x.a = f
+ t = x.a # no read barrier
+ llop.stm_commit_if_not_atomic(lltype.Void)
+ t += x.a
+ llop.stm_start_if_not_atomic(lltype.Void)
+ t += x.a
+ llop.stm_transaction_break(lltype.Void)
+ t += x.a
+ llop.stm_enter_callback_call(lltype.Void)
+ t += x.a
+ llop.stm_leave_callback_call(lltype.Void)
+ t += x.a
+ return t
+
+ self.interpret(f1, [1])
+ assert len(self.read_barriers) == 5
+
+
+external_release_gil = rffi.llexternal('external_release_gil', [], lltype.Void,
+ _callable=lambda: None,
+ random_effects_on_gcobjs=True,
+ releasegil=True) # GIL is released
+external_any_gcobj = rffi.llexternal('external_any_gcobj', [], lltype.Void,
+ _callable=lambda: None,
+ random_effects_on_gcobjs=True,
+ releasegil=False) # GIL is not released
+external_safest = rffi.llexternal('external_safest', [], lltype.Void,
+ _callable=lambda: None,
+ random_effects_on_gcobjs=False,
+ releasegil=False) # GIL is not released
diff --git a/rpython/translator/stm/test/transform_support.py b/rpython/translator/stm/test/transform_support.py
--- a/rpython/translator/stm/test/transform_support.py
+++ b/rpython/translator/stm/test/transform_support.py
@@ -68,10 +68,7 @@
stm_ignored = False
def eval(self):
- self.gcptrs_actually_read = []
result = LLFrame.eval(self)
- for x in self.gcptrs_actually_read:
- assert x in self.llinterpreter.tester.read_barriers
return result
def all_stm_ptrs(self):
@@ -83,9 +80,6 @@
def op_stm_read(self, obj):
self.llinterpreter.tester.read_barriers.append(obj)
- def op_stm_write(self, obj):
- self.op_stm_read(obj) # implicitly counts as a read barrier too
-
def op_stm_ignored_start(self):
assert self.stm_ignored == False
self.stm_ignored = True
@@ -95,61 +89,52 @@
self.stm_ignored = False
def op_getfield(self, obj, field):
- if obj._TYPE.TO._gckind == 'gc':
- if obj._TYPE.TO._immutable_field(field):
- if not self.stm_ignored:
- self.gcptrs_actually_read.append(obj)
return LLFrame.op_getfield(self, obj, field)
def op_setfield(self, obj, fieldname, fieldvalue):
- if obj._TYPE.TO._gckind == 'gc':
- T = lltype.typeOf(fieldvalue)
- if isinstance(T, lltype.Ptr) and T.TO._gckind == 'gc':
- self.check_category(obj, 'W')
- else:
- self.check_category(obj, 'V')
- # convert R -> Q all other pointers to the same object we can find
- for p in self.all_stm_ptrs():
- if p._category == 'R' and p._T == obj._T and p == obj:
- _stmptr._category.__set__(p, 'Q')
return LLFrame.op_setfield(self, obj, fieldname, fieldvalue)
def op_cast_pointer(self, RESTYPE, obj):
if obj._TYPE.TO._gckind == 'gc':
- cat = self.check_category(obj, None)
p = opimpl.op_cast_pointer(RESTYPE, obj)
- return _stmptr(p, cat)
+ return p
return lltype.cast_pointer(RESTYPE, obj)
op_cast_pointer.need_result_type = True
def op_cast_opaque_ptr(self, RESTYPE, obj):
if obj._TYPE.TO._gckind == 'gc':
- cat = self.check_category(obj, None)
p = lltype.cast_opaque_ptr(RESTYPE, obj)
- return _stmptr(p, cat)
+ return p
return LLFrame.op_cast_opaque_ptr(self, RESTYPE, obj)
op_cast_opaque_ptr.need_result_type = True
def op_malloc(self, obj, flags):
assert flags['flavor'] == 'gc'
- # convert all existing pointers W -> V
- for p in self.all_stm_ptrs():
- if p._category == 'W':
- _stmptr._category.__set__(p, 'V')
p = LLFrame.op_malloc(self, obj, flags)
- ptr2 = _stmptr(p, 'W')
- self.llinterpreter.tester.writemode.add(ptr2._obj)
+ ptr2 = p
return ptr2
def transaction_break(self):
- # convert -> I all other pointers to the same object we can find
- for p in self.all_stm_ptrs():
- if p._category > 'I':
- _stmptr._category.__set__(p, 'I')
+ pass
def op_stm_commit_transaction(self):
self.transaction_break()
+ def op_stm_transaction_break(self):
+ self.transaction_break()
+
+ def op_stm_commit_if_not_atomic(self):
+ self.transaction_break()
+
+ def op_stm_start_if_not_atomic(self):
+ self.transaction_break()
+
+ def op_stm_enter_callback_call(self):
+ self.transaction_break()
+
+ def op_stm_leave_callback_call(self):
+ self.transaction_break()
+
def op_stm_begin_inevitable_transaction(self):
self.transaction_break()
diff --git a/rpython/translator/stm/transform.py b/rpython/translator/stm/transform.py
--- a/rpython/translator/stm/transform.py
+++ b/rpython/translator/stm/transform.py
@@ -1,5 +1,6 @@
from rpython.translator.stm.inevitable import insert_turn_inevitable
from rpython.translator.stm.readbarrier import insert_stm_read_barrier
+from rpython.translator.stm.breakfinder import TransactionBreakAnalyzer
from rpython.translator.c.support import log
@@ -25,8 +26,12 @@
def transform_read_barrier(self):
self.read_barrier_counts = 0
+ self.break_analyzer = TransactionBreakAnalyzer(self.translator)
+
for graph in self.translator.graphs:
insert_stm_read_barrier(self, graph)
+
+ del self.break_analyzer
log.info("%d read barriers inserted" % (self.read_barrier_counts,))
def transform_turn_inevitable(self):
From noreply at buildbot.pypy.org Wed Aug 20 13:02:52 2014
From: noreply at buildbot.pypy.org (Raemi)
Date: Wed, 20 Aug 2014 13:02:52 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7: simplify by removing the renaming
Message-ID: <20140820110252.738AB1C14FF@cobra.cs.uni-duesseldorf.de>
Author: Remi Meier
Branch: stmgc-c7
Changeset: r72926:3b0d4669070d
Date: 2014-08-20 11:19 +0200
http://bitbucket.org/pypy/pypy/changeset/3b0d4669070d/
Log: simplify by removing the renaming
diff --git a/rpython/translator/stm/readbarrier.py b/rpython/translator/stm/readbarrier.py
--- a/rpython/translator/stm/readbarrier.py
+++ b/rpython/translator/stm/readbarrier.py
@@ -19,12 +19,6 @@
def is_gc_ptr(T):
return isinstance(T, lltype.Ptr) and T.TO._gckind == 'gc'
-class Renaming(object):
- def __init__(self, newvar, category):
- self.newvar = newvar # a Variable or a Constant
- self.TYPE = newvar.concretetype
- self.category = category
-
class BlockTransformer(object):
@@ -93,43 +87,22 @@
def flow_through_block(self):
- def renfetch(v):
- try:
- return renamings[v]
- except KeyError:
- ren = Renaming(v, 'A')
- renamings[v] = ren
- return ren
+ def catfetch(v):
+ return cat_map.setdefault(v, 'A')
def get_category_or_null(v):
# 'v' is an original variable here, or a constant
if isinstance(v, Constant) and not v.value: # a NULL constant
return 'Z'
- if v in renamings:
- return renamings[v].category
+ if v in cat_map:
+ return cat_map[v]
if isinstance(v, Constant):
return 'R'
else:
return 'A'
- def renamings_get(v):
- try:
- ren = renamings[v]
- except KeyError:
- return v # unmodified
- v2 = ren.newvar
- if v2.concretetype == v.concretetype:
- return v2
- v3 = varoftype(v.concretetype)
- newoperations.append(SpaceOperation('cast_pointer', [v2], v3))
- if lltype.castable(ren.TYPE, v3.concretetype) > 0:
- ren.TYPE = v3.concretetype
- return v3
- # note: 'renamings' maps old vars to new vars, but cast_pointers
- # are done lazily. It means that the two vars may not have
- # exactly the same type.
- renamings = {} # {original-var: Renaming(newvar, category)}
+ cat_map = {} # var: category
newoperations = []
stmtransformer = self.stmtransformer
@@ -138,42 +111,38 @@
for v, cat in zip(self.block.inputargs, self.inputargs_category):
if is_gc_ptr(v.concretetype):
assert cat is not None
- renamings[v] = Renaming(v, cat)
+ cat_map[v] = cat
for op in self.block.operations:
#
if (op.opname in ('cast_pointer', 'same_as') and
is_gc_ptr(op.result.concretetype)):
- renamings[op.result] = renfetch(op.args[0])
- continue
+ cat_map[op.result] = catfetch(op.args[0])
+ assert not self.wants_a_barrier.get(op)
#
to = self.wants_a_barrier.get(op)
if to is not None:
- ren = renfetch(op.args[0])
- frm = ren.category
+ var = op.args[0]
+ frm = catfetch(op.args[0])
if needs_barrier(frm, to):
stmtransformer.read_barrier_counts += 1
v_none = varoftype(lltype.Void)
newoperations.append(
- SpaceOperation('stm_read', [ren.newvar], v_none))
- ren.category = to
+ SpaceOperation('stm_read', [var], v_none))
+ cat_map[var] = to
#
- # XXX: from c4: we can probably just append the original op
- newop = SpaceOperation(op.opname,
- [renamings_get(v) for v in op.args],
- op.result)
- newoperations.append(newop)
+ newoperations.append(op)
#
if (stmtransformer.break_analyzer.analyze(op)
or op.opname == 'debug_stm_flush_barrier'):
# this operation can perform a transaction break:
# all pointers are lowered to 'A'
- for ren in renamings.values():
- ren.category = 'A'
+ for v in cat_map.keys():
+ cat_map[v] = 'A'
#
if op.opname in MALLOCS:
- assert op.result not in renamings
- renamings[op.result] = Renaming(op.result, 'R')
+ assert op.result not in cat_map
+ cat_map[op.result] = 'R'
#
if op.opname in ('setfield', 'setarrayitem', 'setinteriorfield',
'raw_store'):
@@ -181,10 +150,10 @@
# ops that need a write barrier also make the var 'R'
if (op.args[-1].concretetype is not lltype.Void
and is_gc_ptr(op.args[0].concretetype)):
- renfetch(op.args[0]).category = 'R'
+ cat_map[op.args[0]] = 'R'
if isinstance(self.block.exitswitch, Variable):
- switchv = renamings_get(self.block.exitswitch)
+ switchv = self.block.exitswitch
else:
switchv = None
blockoperations = newoperations
@@ -198,7 +167,7 @@
cat = None
output_categories.append(cat)
newoperations = []
- newargs = [renamings_get(v) for v in link.args]
+ newargs = link.args
linkoperations.append((newargs, newoperations, output_categories))
#
# Record how we'd like to patch the block, but don't do any
From noreply at buildbot.pypy.org Wed Aug 20 13:02:53 2014
From: noreply at buildbot.pypy.org (Raemi)
Date: Wed, 20 Aug 2014 13:02:53 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7: another small simplification
Message-ID: <20140820110253.942DC1C14FF@cobra.cs.uni-duesseldorf.de>
Author: Remi Meier
Branch: stmgc-c7
Changeset: r72927:28c253683fc2
Date: 2014-08-20 11:25 +0200
http://bitbucket.org/pypy/pypy/changeset/28c253683fc2/
Log: another small simplification
diff --git a/rpython/translator/stm/readbarrier.py b/rpython/translator/stm/readbarrier.py
--- a/rpython/translator/stm/readbarrier.py
+++ b/rpython/translator/stm/readbarrier.py
@@ -167,8 +167,7 @@
cat = None
output_categories.append(cat)
newoperations = []
- newargs = link.args
- linkoperations.append((newargs, newoperations, output_categories))
+ linkoperations.append((newoperations, output_categories))
#
# Record how we'd like to patch the block, but don't do any
# patching yet
@@ -179,7 +178,7 @@
(_, _, linkoperations) = self.patch
assert len(linkoperations) == len(self.block.exits)
targetbts = []
- for link, (_, _, output_categories) in zip(self.block.exits,
+ for link, (_, output_categories) in zip(self.block.exits,
linkoperations):
targetblock = link.target
if targetblock not in block_transformers:
@@ -215,9 +214,8 @@
if switchv is not None:
self.block.exitswitch = switchv
assert len(linkoperations) == len(self.block.exits)
- for link, (newargs, newoperations, _) in zip(self.block.exits,
+ for link, (newoperations, _) in zip(self.block.exits,
linkoperations):
- link.args[:] = newargs
if newoperations:
# must put them in a fresh block along the link
annotator = self.stmtransformer.translator.annotator
From noreply at buildbot.pypy.org Wed Aug 20 13:02:54 2014
From: noreply at buildbot.pypy.org (Raemi)
Date: Wed, 20 Aug 2014 13:02:54 +0200 (CEST)
Subject: [pypy-commit] pypy stmgc-c7: Merge
Message-ID: <20140820110254.C37C01C14FF@cobra.cs.uni-duesseldorf.de>
Author: Remi Meier
Branch: stmgc-c7
Changeset: r72928:24b0a79addbe
Date: 2014-08-20 13:02 +0200
http://bitbucket.org/pypy/pypy/changeset/24b0a79addbe/
Log: Merge
diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py
--- a/rpython/jit/backend/llsupport/stmrewrite.py
+++ b/rpython/jit/backend/llsupport/stmrewrite.py
@@ -27,6 +27,12 @@
self._do_stm_call('stm_hint_commit_soon', [], None,
op.stm_location)
return
+ # ---------- jump, finish, guard_not_forced_2 ----------
+ if (opnum == rop.JUMP or opnum == rop.FINISH
+ or opnum == rop.GUARD_NOT_FORCED_2):
+ self.add_dummy_allocation()
+ self.newops.append(op)
+ return
# ---------- pure operations, guards ----------
if op.is_always_pure() or op.is_guard() or op.is_ovf():
self.newops.append(op)
@@ -84,11 +90,6 @@
):
self.newops.append(op)
return
- # ---------- jump, finish ----------
- if opnum == rop.JUMP or opnum == rop.FINISH:
- self.add_dummy_allocation()
- self.newops.append(op)
- return
# ---------- fall-back ----------
# Check that none of the ops handled here can collect.
# This is not done by the fallback here
diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py
--- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py
+++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py
@@ -1322,3 +1322,18 @@
$DUMMYALLOC
jump(i1)
""")
+
+ def test_dummy_alloc_is_before_guard_not_forced_2(self):
+ self.check_rewrite("""
+ []
+ escape()
+ guard_not_forced_2() []
+ finish()
+ """, """
+ []
+ $INEV
+ escape()
+ $DUMMYALLOC
+ guard_not_forced_2() []
+ finish()
+ """)
From noreply at buildbot.pypy.org Wed Aug 20 13:31:00 2014
From: noreply at buildbot.pypy.org (arigo)
Date: Wed, 20 Aug 2014 13:31:00 +0200 (CEST)
Subject: [pypy-commit] pypy default: Rename Block.get_graph() to make it
clear that it's a slow method, used
Message-ID: <20140820113100.722F01C148A@cobra.cs.uni-duesseldorf.de>
Author: Armin Rigo
Branch:
Changeset: r72929:b059fb4e325c
Date: 2014-08-20 13:30 +0200
http://bitbucket.org/pypy/pypy/changeset/b059fb4e325c/
Log: Rename Block.get_graph() to make it clear that it's a slow method,
used only (so far) by the pygame viewer
diff --git a/rpython/flowspace/model.py b/rpython/flowspace/model.py
--- a/rpython/flowspace/model.py
+++ b/rpython/flowspace/model.py
@@ -252,7 +252,7 @@
from rpython.translator.tool.graphpage import try_show
try_show(self)
- def get_graph(self):
+ def _slowly_get_graph(self):
import gc
pending = [self] # pending blocks
seen = {self: True, None: True}
diff --git a/rpython/translator/tool/graphpage.py b/rpython/translator/tool/graphpage.py
--- a/rpython/translator/tool/graphpage.py
+++ b/rpython/translator/tool/graphpage.py
@@ -409,7 +409,7 @@
elif isinstance(obj, Link):
try_show(obj.prevblock)
elif isinstance(obj, Block):
- graph = obj.get_graph()
+ graph = obj._slowly_get_graph()
if isinstance(graph, FunctionGraph):
graph.show()
return
From noreply at buildbot.pypy.org Wed Aug 20 15:09:03 2014
From: noreply at buildbot.pypy.org (fijal)
Date: Wed, 20 Aug 2014 15:09:03 +0200 (CEST)
Subject: [pypy-commit] pypy use-file-star-for-file: fix the case of
llstr(char)
Message-ID: <20140820130903.D4F661C1482@cobra.cs.uni-duesseldorf.de>
Author: Maciej Fijalkowski
Branch: use-file-star-for-file
Changeset: r72930:311385061a9b
Date: 2014-08-20 14:38 +0200
http://bitbucket.org/pypy/pypy/changeset/311385061a9b/
Log: fix the case of llstr(char)
diff --git a/rpython/rtyper/annlowlevel.py b/rpython/rtyper/annlowlevel.py
--- a/rpython/rtyper/annlowlevel.py
+++ b/rpython/rtyper/annlowlevel.py
@@ -422,11 +422,13 @@
def specialize_call(self, hop):
hop.exception_cannot_occur()
- assert hop.args_r[0].lowleveltype == hop.r_result.lowleveltype
v_ll_str, = hop.inputargs(*hop.args_r)
- return hop.genop('same_as', [v_ll_str],
- resulttype = hop.r_result.lowleveltype)
-
+ if hop.args_r[0].lowleveltype == hop.r_result.lowleveltype:
+ return hop.genop('same_as', [v_ll_str],
+ resulttype = hop.r_result.lowleveltype)
+ else:
+ return hop.gendirectcall(hop.args_r[0].ll.ll_chr2str, v_ll_str)
+
return hlstr, llstr
hlstr, llstr = make_string_entries(str)
diff --git a/rpython/rtyper/test/test_annlowlevel.py b/rpython/rtyper/test/test_annlowlevel.py
--- a/rpython/rtyper/test/test_annlowlevel.py
+++ b/rpython/rtyper/test/test_annlowlevel.py
@@ -34,6 +34,14 @@
res = self.interpret(f, [self.string_to_ll("abc")])
assert res == 3
+ def test_llstr_const_char(self):
+ def f(arg):
+ s = llstr(hlstr(arg)[0])
+ return len(s.chars)
+
+ res = self.interpret(f, [self.string_to_ll("abc")])
+ assert res == 1
+
def test_hlunicode(self):
s = mallocunicode(3)
s.chars[0] = u"a"
From noreply at buildbot.pypy.org Wed Aug 20 15:09:05 2014
From: noreply at buildbot.pypy.org (fijal)
Date: Wed, 20 Aug 2014 15:09:05 +0200 (CEST)
Subject: [pypy-commit] pypy use-file-star-for-file: (arigo) simplify
Message-ID: <20140820130905.07FBE1C1482@cobra.cs.uni-duesseldorf.de>
Author: Maciej Fijalkowski